xref: /linux-6.15/arch/arm64/kernel/patching.c (revision 5f154c4e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/mm.h>
4 #include <linux/smp.h>
5 #include <linux/spinlock.h>
6 #include <linux/stop_machine.h>
7 #include <linux/uaccess.h>
8 
9 #include <asm/cacheflush.h>
10 #include <asm/fixmap.h>
11 #include <asm/kprobes.h>
12 #include <asm/sections.h>
13 
14 static DEFINE_RAW_SPINLOCK(patch_lock);
15 
16 static bool is_exit_text(unsigned long addr)
17 {
18 	/* discarded with init text/data */
19 	return system_state < SYSTEM_RUNNING &&
20 		addr >= (unsigned long)__exittext_begin &&
21 		addr < (unsigned long)__exittext_end;
22 }
23 
24 static bool is_image_text(unsigned long addr)
25 {
26 	return core_kernel_text(addr) || is_exit_text(addr);
27 }
28 
29 static void __kprobes *patch_map(void *addr, int fixmap)
30 {
31 	unsigned long uintaddr = (uintptr_t) addr;
32 	bool image = is_image_text(uintaddr);
33 	struct page *page;
34 
35 	if (image)
36 		page = phys_to_page(__pa_symbol(addr));
37 	else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
38 		page = vmalloc_to_page(addr);
39 	else
40 		return addr;
41 
42 	BUG_ON(!page);
43 	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
44 			(uintaddr & ~PAGE_MASK));
45 }
46 
47 static void __kprobes patch_unmap(int fixmap)
48 {
49 	clear_fixmap(fixmap);
50 }
51 /*
52  * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
53  * little-endian.
54  */
55 int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
56 {
57 	int ret;
58 	__le32 val;
59 
60 	ret = copy_from_kernel_nofault(&val, addr, AARCH64_INSN_SIZE);
61 	if (!ret)
62 		*insnp = le32_to_cpu(val);
63 
64 	return ret;
65 }
66 
67 static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
68 {
69 	void *waddr = addr;
70 	unsigned long flags = 0;
71 	int ret;
72 
73 	raw_spin_lock_irqsave(&patch_lock, flags);
74 	waddr = patch_map(addr, FIX_TEXT_POKE0);
75 
76 	ret = copy_to_kernel_nofault(waddr, &insn, AARCH64_INSN_SIZE);
77 
78 	patch_unmap(FIX_TEXT_POKE0);
79 	raw_spin_unlock_irqrestore(&patch_lock, flags);
80 
81 	return ret;
82 }
83 
84 int __kprobes aarch64_insn_write(void *addr, u32 insn)
85 {
86 	return __aarch64_insn_write(addr, cpu_to_le32(insn));
87 }
88 
89 int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
90 {
91 	u32 *tp = addr;
92 	int ret;
93 
94 	/* A64 instructions must be word aligned */
95 	if ((uintptr_t)tp & 0x3)
96 		return -EINVAL;
97 
98 	ret = aarch64_insn_write(tp, insn);
99 	if (ret == 0)
100 		__flush_icache_range((uintptr_t)tp,
101 				     (uintptr_t)tp + AARCH64_INSN_SIZE);
102 
103 	return ret;
104 }
105 
106 struct aarch64_insn_patch {
107 	void		**text_addrs;
108 	u32		*new_insns;
109 	int		insn_cnt;
110 	atomic_t	cpu_count;
111 };
112 
113 static int __kprobes aarch64_insn_patch_text_cb(void *arg)
114 {
115 	int i, ret = 0;
116 	struct aarch64_insn_patch *pp = arg;
117 
118 	/* The first CPU becomes master */
119 	if (atomic_inc_return(&pp->cpu_count) == 1) {
120 		for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
121 			ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
122 							     pp->new_insns[i]);
123 		/* Notify other processors with an additional increment. */
124 		atomic_inc(&pp->cpu_count);
125 	} else {
126 		while (atomic_read(&pp->cpu_count) <= num_online_cpus())
127 			cpu_relax();
128 		isb();
129 	}
130 
131 	return ret;
132 }
133 
134 int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
135 {
136 	struct aarch64_insn_patch patch = {
137 		.text_addrs = addrs,
138 		.new_insns = insns,
139 		.insn_cnt = cnt,
140 		.cpu_count = ATOMIC_INIT(0),
141 	};
142 
143 	if (cnt <= 0)
144 		return -EINVAL;
145 
146 	return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
147 				       cpu_online_mask);
148 }
149