xref: /linux-6.15/arch/powerpc/lib/code-patching.c (revision bbffdd2f)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2aaddd3eaSMichael Ellerman /*
3aaddd3eaSMichael Ellerman  *  Copyright 2008 Michael Ellerman, IBM Corporation.
4aaddd3eaSMichael Ellerman  */
5aaddd3eaSMichael Ellerman 
671f6e58eSNaveen N. Rao #include <linux/kprobes.h>
7ae0dc736SMichael Ellerman #include <linux/vmalloc.h>
8ae0dc736SMichael Ellerman #include <linux/init.h>
937bc3e5fSBalbir Singh #include <linux/cpuhotplug.h>
107c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
11b0337678SChristophe Leroy #include <linux/jump_label.h>
12aaddd3eaSMichael Ellerman 
1337bc3e5fSBalbir Singh #include <asm/tlbflush.h>
1437bc3e5fSBalbir Singh #include <asm/page.h>
1537bc3e5fSBalbir Singh #include <asm/code-patching.h>
1675346251SJordan Niethe #include <asm/inst.h>
17aaddd3eaSMichael Ellerman 
18c545b9f0SChristophe Leroy static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr)
19aaddd3eaSMichael Ellerman {
20e63ceebdSChristophe Leroy 	if (!ppc_inst_prefixed(instr)) {
21e63ceebdSChristophe Leroy 		u32 val = ppc_inst_val(instr);
22e63ceebdSChristophe Leroy 
23e63ceebdSChristophe Leroy 		__put_kernel_nofault(patch_addr, &val, u32, failed);
24e63ceebdSChristophe Leroy 	} else {
25693557ebSChristophe Leroy 		u64 val = ppc_inst_as_ulong(instr);
26e63ceebdSChristophe Leroy 
27e63ceebdSChristophe Leroy 		__put_kernel_nofault(patch_addr, &val, u64, failed);
28e63ceebdSChristophe Leroy 	}
2937bc3e5fSBalbir Singh 
308cf4c057SChristophe Leroy 	asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr),
318cf4c057SChristophe Leroy 							    "r" (exec_addr));
3237bc3e5fSBalbir Singh 
33b6e37968SSteven Rostedt 	return 0;
34e64ac41aSChristophe Leroy 
35e64ac41aSChristophe Leroy failed:
36*bbffdd2fSChristophe Leroy 	return -EPERM;
37aaddd3eaSMichael Ellerman }
38aaddd3eaSMichael Ellerman 
39c545b9f0SChristophe Leroy int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
408cf4c057SChristophe Leroy {
418cf4c057SChristophe Leroy 	return __patch_instruction(addr, instr, addr);
428cf4c057SChristophe Leroy }
438cf4c057SChristophe Leroy 
4437bc3e5fSBalbir Singh #ifdef CONFIG_STRICT_KERNEL_RWX
4537bc3e5fSBalbir Singh static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
4637bc3e5fSBalbir Singh 
47591b4b26SMichael Ellerman static int map_patch_area(void *addr, unsigned long text_poke_addr);
48591b4b26SMichael Ellerman static void unmap_patch_area(unsigned long addr);
49591b4b26SMichael Ellerman 
5037bc3e5fSBalbir Singh static int text_area_cpu_up(unsigned int cpu)
5137bc3e5fSBalbir Singh {
5237bc3e5fSBalbir Singh 	struct vm_struct *area;
53591b4b26SMichael Ellerman 	unsigned long addr;
54591b4b26SMichael Ellerman 	int err;
5537bc3e5fSBalbir Singh 
5637bc3e5fSBalbir Singh 	area = get_vm_area(PAGE_SIZE, VM_ALLOC);
5737bc3e5fSBalbir Singh 	if (!area) {
5837bc3e5fSBalbir Singh 		WARN_ONCE(1, "Failed to create text area for cpu %d\n",
5937bc3e5fSBalbir Singh 			cpu);
6037bc3e5fSBalbir Singh 		return -1;
6137bc3e5fSBalbir Singh 	}
62591b4b26SMichael Ellerman 
63591b4b26SMichael Ellerman 	// Map/unmap the area to ensure all page tables are pre-allocated
64591b4b26SMichael Ellerman 	addr = (unsigned long)area->addr;
65591b4b26SMichael Ellerman 	err = map_patch_area(empty_zero_page, addr);
66591b4b26SMichael Ellerman 	if (err)
67591b4b26SMichael Ellerman 		return err;
68591b4b26SMichael Ellerman 
69591b4b26SMichael Ellerman 	unmap_patch_area(addr);
70591b4b26SMichael Ellerman 
7137bc3e5fSBalbir Singh 	this_cpu_write(text_poke_area, area);
7237bc3e5fSBalbir Singh 
7337bc3e5fSBalbir Singh 	return 0;
7437bc3e5fSBalbir Singh }
7537bc3e5fSBalbir Singh 
7637bc3e5fSBalbir Singh static int text_area_cpu_down(unsigned int cpu)
7737bc3e5fSBalbir Singh {
7837bc3e5fSBalbir Singh 	free_vm_area(this_cpu_read(text_poke_area));
7937bc3e5fSBalbir Singh 	return 0;
8037bc3e5fSBalbir Singh }
8137bc3e5fSBalbir Singh 
8217512892SChristophe Leroy static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done);
8317512892SChristophe Leroy 
8437bc3e5fSBalbir Singh /*
8571a5b3dbSJordan Niethe  * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and
8671a5b3dbSJordan Niethe  * we judge it as being preferable to a kernel that will crash later when
8771a5b3dbSJordan Niethe  * someone tries to use patch_instruction().
8837bc3e5fSBalbir Singh  */
8971a5b3dbSJordan Niethe void __init poking_init(void)
9037bc3e5fSBalbir Singh {
9137bc3e5fSBalbir Singh 	BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
9237bc3e5fSBalbir Singh 		"powerpc/text_poke:online", text_area_cpu_up,
9337bc3e5fSBalbir Singh 		text_area_cpu_down));
9417512892SChristophe Leroy 	static_branch_enable(&poking_init_done);
9537bc3e5fSBalbir Singh }
9637bc3e5fSBalbir Singh 
9737bc3e5fSBalbir Singh /*
9837bc3e5fSBalbir Singh  * This can be called for kernel text or a module.
9937bc3e5fSBalbir Singh  */
10037bc3e5fSBalbir Singh static int map_patch_area(void *addr, unsigned long text_poke_addr)
10137bc3e5fSBalbir Singh {
10237bc3e5fSBalbir Singh 	unsigned long pfn;
10337bc3e5fSBalbir Singh 
104cb3ac452SChristophe Leroy 	if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr))
10537bc3e5fSBalbir Singh 		pfn = vmalloc_to_pfn(addr);
10637bc3e5fSBalbir Singh 	else
10737bc3e5fSBalbir Singh 		pfn = __pa_symbol(addr) >> PAGE_SHIFT;
10837bc3e5fSBalbir Singh 
109285672f9SChristophe Leroy 	return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
11037bc3e5fSBalbir Singh }
11137bc3e5fSBalbir Singh 
112a3483c3dSChristophe Leroy static void unmap_patch_area(unsigned long addr)
11337bc3e5fSBalbir Singh {
11437bc3e5fSBalbir Singh 	pte_t *ptep;
11537bc3e5fSBalbir Singh 	pmd_t *pmdp;
11637bc3e5fSBalbir Singh 	pud_t *pudp;
1172fb47060SMike Rapoport 	p4d_t *p4dp;
11837bc3e5fSBalbir Singh 	pgd_t *pgdp;
11937bc3e5fSBalbir Singh 
12037bc3e5fSBalbir Singh 	pgdp = pgd_offset_k(addr);
121a3483c3dSChristophe Leroy 	if (WARN_ON(pgd_none(*pgdp)))
122a3483c3dSChristophe Leroy 		return;
12337bc3e5fSBalbir Singh 
1242fb47060SMike Rapoport 	p4dp = p4d_offset(pgdp, addr);
125a3483c3dSChristophe Leroy 	if (WARN_ON(p4d_none(*p4dp)))
126a3483c3dSChristophe Leroy 		return;
1272fb47060SMike Rapoport 
1282fb47060SMike Rapoport 	pudp = pud_offset(p4dp, addr);
129a3483c3dSChristophe Leroy 	if (WARN_ON(pud_none(*pudp)))
130a3483c3dSChristophe Leroy 		return;
13137bc3e5fSBalbir Singh 
13237bc3e5fSBalbir Singh 	pmdp = pmd_offset(pudp, addr);
133a3483c3dSChristophe Leroy 	if (WARN_ON(pmd_none(*pmdp)))
134a3483c3dSChristophe Leroy 		return;
13537bc3e5fSBalbir Singh 
13637bc3e5fSBalbir Singh 	ptep = pte_offset_kernel(pmdp, addr);
137a3483c3dSChristophe Leroy 	if (WARN_ON(pte_none(*ptep)))
138a3483c3dSChristophe Leroy 		return;
13937bc3e5fSBalbir Singh 
14037bc3e5fSBalbir Singh 	/*
14137bc3e5fSBalbir Singh 	 * In hash, pte_clear flushes the tlb, in radix, we have to
14237bc3e5fSBalbir Singh 	 */
14337bc3e5fSBalbir Singh 	pte_clear(&init_mm, addr, ptep);
14437bc3e5fSBalbir Singh 	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
14537bc3e5fSBalbir Singh }
14637bc3e5fSBalbir Singh 
1476b21af74SChristophe Leroy static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
1486b21af74SChristophe Leroy {
1496b21af74SChristophe Leroy 	int err;
1506b21af74SChristophe Leroy 	u32 *patch_addr;
1516b21af74SChristophe Leroy 	unsigned long text_poke_addr;
1526b21af74SChristophe Leroy 
1536b21af74SChristophe Leroy 	text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr;
1546b21af74SChristophe Leroy 	patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
1556b21af74SChristophe Leroy 
1566b21af74SChristophe Leroy 	err = map_patch_area(addr, text_poke_addr);
1576b21af74SChristophe Leroy 	if (err)
1586b21af74SChristophe Leroy 		return err;
1596b21af74SChristophe Leroy 
1606b21af74SChristophe Leroy 	err = __patch_instruction(addr, instr, patch_addr);
1616b21af74SChristophe Leroy 
1626b21af74SChristophe Leroy 	unmap_patch_area(text_poke_addr);
1636b21af74SChristophe Leroy 
1646b21af74SChristophe Leroy 	return err;
1656b21af74SChristophe Leroy }
1666b21af74SChristophe Leroy 
167c545b9f0SChristophe Leroy static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
16837bc3e5fSBalbir Singh {
16937bc3e5fSBalbir Singh 	int err;
17037bc3e5fSBalbir Singh 	unsigned long flags;
17137bc3e5fSBalbir Singh 
17237bc3e5fSBalbir Singh 	/*
17337bc3e5fSBalbir Singh 	 * During early early boot patch_instruction is called
17437bc3e5fSBalbir Singh 	 * when text_poke_area is not ready, but we still need
17537bc3e5fSBalbir Singh 	 * to allow patching. We just do the plain old patching
17637bc3e5fSBalbir Singh 	 */
17717512892SChristophe Leroy 	if (!static_branch_likely(&poking_init_done))
1788cf4c057SChristophe Leroy 		return raw_patch_instruction(addr, instr);
17937bc3e5fSBalbir Singh 
18037bc3e5fSBalbir Singh 	local_irq_save(flags);
1816b21af74SChristophe Leroy 	err = __do_patch_instruction(addr, instr);
18237bc3e5fSBalbir Singh 	local_irq_restore(flags);
18337bc3e5fSBalbir Singh 
18437bc3e5fSBalbir Singh 	return err;
18537bc3e5fSBalbir Singh }
18637bc3e5fSBalbir Singh #else /* !CONFIG_STRICT_KERNEL_RWX */
18737bc3e5fSBalbir Singh 
188c545b9f0SChristophe Leroy static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
18937bc3e5fSBalbir Singh {
1908cf4c057SChristophe Leroy 	return raw_patch_instruction(addr, instr);
19137bc3e5fSBalbir Singh }
19237bc3e5fSBalbir Singh 
19337bc3e5fSBalbir Singh #endif /* CONFIG_STRICT_KERNEL_RWX */
194b45ba4a5SChristophe Leroy 
195b0337678SChristophe Leroy __ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free);
196b0337678SChristophe Leroy 
197c545b9f0SChristophe Leroy int patch_instruction(u32 *addr, ppc_inst_t instr)
198b45ba4a5SChristophe Leroy {
199b45ba4a5SChristophe Leroy 	/* Make sure we aren't patching a freed init section */
200b0337678SChristophe Leroy 	if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4))
201b45ba4a5SChristophe Leroy 		return 0;
202edecd2d6SChristophe Leroy 
203b45ba4a5SChristophe Leroy 	return do_patch_instruction(addr, instr);
204b45ba4a5SChristophe Leroy }
20537bc3e5fSBalbir Singh NOKPROBE_SYMBOL(patch_instruction);
20637bc3e5fSBalbir Singh 
20769d4d6e5SChristophe Leroy int patch_branch(u32 *addr, unsigned long target, int flags)
208e7a57273SMichael Ellerman {
209c545b9f0SChristophe Leroy 	ppc_inst_t instr;
2107c95d889SJordan Niethe 
211d5937db1SChristophe Leroy 	if (create_branch(&instr, addr, target, flags))
212d5937db1SChristophe Leroy 		return -ERANGE;
213d5937db1SChristophe Leroy 
2147c95d889SJordan Niethe 	return patch_instruction(addr, instr);
215e7a57273SMichael Ellerman }
216e7a57273SMichael Ellerman 
21751c9c084SAnju T /*
21851c9c084SAnju T  * Helper to check if a given instruction is a conditional branch
21951c9c084SAnju T  * Derived from the conditional checks in analyse_instr()
22051c9c084SAnju T  */
221c545b9f0SChristophe Leroy bool is_conditional_branch(ppc_inst_t instr)
22251c9c084SAnju T {
2238094892dSJordan Niethe 	unsigned int opcode = ppc_inst_primary_opcode(instr);
22451c9c084SAnju T 
22551c9c084SAnju T 	if (opcode == 16)       /* bc, bca, bcl, bcla */
22651c9c084SAnju T 		return true;
22751c9c084SAnju T 	if (opcode == 19) {
228777e26f0SJordan Niethe 		switch ((ppc_inst_val(instr) >> 1) & 0x3ff) {
22951c9c084SAnju T 		case 16:        /* bclr, bclrl */
23051c9c084SAnju T 		case 528:       /* bcctr, bcctrl */
23151c9c084SAnju T 		case 560:       /* bctar, bctarl */
23251c9c084SAnju T 			return true;
23351c9c084SAnju T 		}
23451c9c084SAnju T 	}
23551c9c084SAnju T 	return false;
23651c9c084SAnju T }
23771f6e58eSNaveen N. Rao NOKPROBE_SYMBOL(is_conditional_branch);
23851c9c084SAnju T 
239c545b9f0SChristophe Leroy int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
240411781a2SMichael Ellerman 		       unsigned long target, int flags)
241411781a2SMichael Ellerman {
242411781a2SMichael Ellerman 	long offset;
243411781a2SMichael Ellerman 
244411781a2SMichael Ellerman 	offset = target;
245411781a2SMichael Ellerman 	if (! (flags & BRANCH_ABSOLUTE))
246411781a2SMichael Ellerman 		offset = offset - (unsigned long)addr;
247411781a2SMichael Ellerman 
248411781a2SMichael Ellerman 	/* Check we can represent the target in the instruction format */
2494549c3eaSNaveen N. Rao 	if (!is_offset_in_cond_branch_range(offset))
2507c95d889SJordan Niethe 		return 1;
251411781a2SMichael Ellerman 
252411781a2SMichael Ellerman 	/* Mask out the flags and target, so they don't step on each other. */
25394afd069SJordan Niethe 	*instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC));
254411781a2SMichael Ellerman 
2557c95d889SJordan Niethe 	return 0;
256411781a2SMichael Ellerman }
257411781a2SMichael Ellerman 
258c545b9f0SChristophe Leroy int instr_is_relative_branch(ppc_inst_t instr)
259411781a2SMichael Ellerman {
260777e26f0SJordan Niethe 	if (ppc_inst_val(instr) & BRANCH_ABSOLUTE)
261411781a2SMichael Ellerman 		return 0;
262411781a2SMichael Ellerman 
263411781a2SMichael Ellerman 	return instr_is_branch_iform(instr) || instr_is_branch_bform(instr);
264411781a2SMichael Ellerman }
265411781a2SMichael Ellerman 
266c545b9f0SChristophe Leroy int instr_is_relative_link_branch(ppc_inst_t instr)
267b9eab08dSJosh Poimboeuf {
268777e26f0SJordan Niethe 	return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK);
269b9eab08dSJosh Poimboeuf }
270b9eab08dSJosh Poimboeuf 
27169d4d6e5SChristophe Leroy static unsigned long branch_iform_target(const u32 *instr)
272411781a2SMichael Ellerman {
273411781a2SMichael Ellerman 	signed long imm;
274411781a2SMichael Ellerman 
27518c85964SChristophe Leroy 	imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC;
276411781a2SMichael Ellerman 
277411781a2SMichael Ellerman 	/* If the top bit of the immediate value is set this is negative */
278411781a2SMichael Ellerman 	if (imm & 0x2000000)
279411781a2SMichael Ellerman 		imm -= 0x4000000;
280411781a2SMichael Ellerman 
28118c85964SChristophe Leroy 	if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
282411781a2SMichael Ellerman 		imm += (unsigned long)instr;
283411781a2SMichael Ellerman 
284411781a2SMichael Ellerman 	return (unsigned long)imm;
285411781a2SMichael Ellerman }
286411781a2SMichael Ellerman 
28769d4d6e5SChristophe Leroy static unsigned long branch_bform_target(const u32 *instr)
288411781a2SMichael Ellerman {
289411781a2SMichael Ellerman 	signed long imm;
290411781a2SMichael Ellerman 
29118c85964SChristophe Leroy 	imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC;
292411781a2SMichael Ellerman 
293411781a2SMichael Ellerman 	/* If the top bit of the immediate value is set this is negative */
294411781a2SMichael Ellerman 	if (imm & 0x8000)
295411781a2SMichael Ellerman 		imm -= 0x10000;
296411781a2SMichael Ellerman 
29718c85964SChristophe Leroy 	if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
298411781a2SMichael Ellerman 		imm += (unsigned long)instr;
299411781a2SMichael Ellerman 
300411781a2SMichael Ellerman 	return (unsigned long)imm;
301411781a2SMichael Ellerman }
302411781a2SMichael Ellerman 
30369d4d6e5SChristophe Leroy unsigned long branch_target(const u32 *instr)
304411781a2SMichael Ellerman {
305f8faaffaSJordan Niethe 	if (instr_is_branch_iform(ppc_inst_read(instr)))
306411781a2SMichael Ellerman 		return branch_iform_target(instr);
307f8faaffaSJordan Niethe 	else if (instr_is_branch_bform(ppc_inst_read(instr)))
308411781a2SMichael Ellerman 		return branch_bform_target(instr);
309411781a2SMichael Ellerman 
310411781a2SMichael Ellerman 	return 0;
311411781a2SMichael Ellerman }
312411781a2SMichael Ellerman 
313c545b9f0SChristophe Leroy int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src)
314411781a2SMichael Ellerman {
315411781a2SMichael Ellerman 	unsigned long target;
316411781a2SMichael Ellerman 	target = branch_target(src);
317411781a2SMichael Ellerman 
318f8faaffaSJordan Niethe 	if (instr_is_branch_iform(ppc_inst_read(src)))
319f8faaffaSJordan Niethe 		return create_branch(instr, dest, target,
320f8faaffaSJordan Niethe 				     ppc_inst_val(ppc_inst_read(src)));
321f8faaffaSJordan Niethe 	else if (instr_is_branch_bform(ppc_inst_read(src)))
322f8faaffaSJordan Niethe 		return create_cond_branch(instr, dest, target,
323f8faaffaSJordan Niethe 					  ppc_inst_val(ppc_inst_read(src)));
324411781a2SMichael Ellerman 
3257c95d889SJordan Niethe 	return 1;
326411781a2SMichael Ellerman }
327