xref: /linux-6.15/kernel/livepatch/patch.c (revision 773c1670)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * patch.c - livepatch patching functions
4  *
5  * Copyright (C) 2014 Seth Jennings <[email protected]>
6  * Copyright (C) 2014 SUSE
7  * Copyright (C) 2015 Josh Poimboeuf <[email protected]>
8  */
9 
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 
12 #include <linux/livepatch.h>
13 #include <linux/list.h>
14 #include <linux/ftrace.h>
15 #include <linux/rculist.h>
16 #include <linux/slab.h>
17 #include <linux/bug.h>
18 #include <linux/printk.h>
19 #include "core.h"
20 #include "patch.h"
21 #include "transition.h"
22 
23 static LIST_HEAD(klp_ops);
24 
25 struct klp_ops *klp_find_ops(void *old_func)
26 {
27 	struct klp_ops *ops;
28 	struct klp_func *func;
29 
30 	list_for_each_entry(ops, &klp_ops, node) {
31 		func = list_first_entry(&ops->func_stack, struct klp_func,
32 					stack_node);
33 		if (func->old_func == old_func)
34 			return ops;
35 	}
36 
37 	return NULL;
38 }
39 
40 static void notrace klp_ftrace_handler(unsigned long ip,
41 				       unsigned long parent_ip,
42 				       struct ftrace_ops *fops,
43 				       struct pt_regs *regs)
44 {
45 	struct klp_ops *ops;
46 	struct klp_func *func;
47 	int patch_state;
48 	int bit;
49 
50 	ops = container_of(fops, struct klp_ops, fops);
51 
52 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
53 	if (WARN_ON_ONCE(bit < 0))
54 		return;
55 	/*
56 	 * A variant of synchronize_rcu() is used to allow patching functions
57 	 * where RCU is not watching, see klp_synchronize_transition().
58 	 */
59 	preempt_disable_notrace();
60 
61 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
62 				      stack_node);
63 
64 	/*
65 	 * func should never be NULL because preemption should be disabled here
66 	 * and unregister_ftrace_function() does the equivalent of a
67 	 * synchronize_rcu() before the func_stack removal.
68 	 */
69 	if (WARN_ON_ONCE(!func))
70 		goto unlock;
71 
72 	/*
73 	 * In the enable path, enforce the order of the ops->func_stack and
74 	 * func->transition reads.  The corresponding write barrier is in
75 	 * __klp_enable_patch().
76 	 *
77 	 * (Note that this barrier technically isn't needed in the disable
78 	 * path.  In the rare case where klp_update_patch_state() runs before
79 	 * this handler, its TIF_PATCH_PENDING read and this func->transition
80 	 * read need to be ordered.  But klp_update_patch_state() already
81 	 * enforces that.)
82 	 */
83 	smp_rmb();
84 
85 	if (unlikely(func->transition)) {
86 
87 		/*
88 		 * Enforce the order of the func->transition and
89 		 * current->patch_state reads.  Otherwise we could read an
90 		 * out-of-date task state and pick the wrong function.  The
91 		 * corresponding write barrier is in klp_init_transition().
92 		 */
93 		smp_rmb();
94 
95 		patch_state = current->patch_state;
96 
97 		WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
98 
99 		if (patch_state == KLP_UNPATCHED) {
100 			/*
101 			 * Use the previously patched version of the function.
102 			 * If no previous patches exist, continue with the
103 			 * original function.
104 			 */
105 			func = list_entry_rcu(func->stack_node.next,
106 					      struct klp_func, stack_node);
107 
108 			if (&func->stack_node == &ops->func_stack)
109 				goto unlock;
110 		}
111 	}
112 
113 	/*
114 	 * NOPs are used to replace existing patches with original code.
115 	 * Do nothing! Setting pc would cause an infinite loop.
116 	 */
117 	if (func->nop)
118 		goto unlock;
119 
120 	klp_arch_set_pc(regs, (unsigned long)func->new_func);
121 
122 unlock:
123 	preempt_enable_notrace();
124 	ftrace_test_recursion_unlock(bit);
125 }
126 
127 /*
128  * Convert a function address into the appropriate ftrace location.
129  *
130  * Usually this is just the address of the function, but on some architectures
131  * it's more complicated so allow them to provide a custom behaviour.
132  */
133 #ifndef klp_get_ftrace_location
134 static unsigned long klp_get_ftrace_location(unsigned long faddr)
135 {
136 	return faddr;
137 }
138 #endif
139 
140 static void klp_unpatch_func(struct klp_func *func)
141 {
142 	struct klp_ops *ops;
143 
144 	if (WARN_ON(!func->patched))
145 		return;
146 	if (WARN_ON(!func->old_func))
147 		return;
148 
149 	ops = klp_find_ops(func->old_func);
150 	if (WARN_ON(!ops))
151 		return;
152 
153 	if (list_is_singular(&ops->func_stack)) {
154 		unsigned long ftrace_loc;
155 
156 		ftrace_loc =
157 			klp_get_ftrace_location((unsigned long)func->old_func);
158 		if (WARN_ON(!ftrace_loc))
159 			return;
160 
161 		WARN_ON(unregister_ftrace_function(&ops->fops));
162 		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
163 
164 		list_del_rcu(&func->stack_node);
165 		list_del(&ops->node);
166 		kfree(ops);
167 	} else {
168 		list_del_rcu(&func->stack_node);
169 	}
170 
171 	func->patched = false;
172 }
173 
174 static int klp_patch_func(struct klp_func *func)
175 {
176 	struct klp_ops *ops;
177 	int ret;
178 
179 	if (WARN_ON(!func->old_func))
180 		return -EINVAL;
181 
182 	if (WARN_ON(func->patched))
183 		return -EINVAL;
184 
185 	ops = klp_find_ops(func->old_func);
186 	if (!ops) {
187 		unsigned long ftrace_loc;
188 
189 		ftrace_loc =
190 			klp_get_ftrace_location((unsigned long)func->old_func);
191 		if (!ftrace_loc) {
192 			pr_err("failed to find location for function '%s'\n",
193 				func->old_name);
194 			return -EINVAL;
195 		}
196 
197 		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
198 		if (!ops)
199 			return -ENOMEM;
200 
201 		ops->fops.func = klp_ftrace_handler;
202 		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
203 				  FTRACE_OPS_FL_DYNAMIC |
204 				  FTRACE_OPS_FL_IPMODIFY |
205 				  FTRACE_OPS_FL_PERMANENT;
206 
207 		list_add(&ops->node, &klp_ops);
208 
209 		INIT_LIST_HEAD(&ops->func_stack);
210 		list_add_rcu(&func->stack_node, &ops->func_stack);
211 
212 		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
213 		if (ret) {
214 			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
215 			       func->old_name, ret);
216 			goto err;
217 		}
218 
219 		ret = register_ftrace_function(&ops->fops);
220 		if (ret) {
221 			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
222 			       func->old_name, ret);
223 			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
224 			goto err;
225 		}
226 
227 
228 	} else {
229 		list_add_rcu(&func->stack_node, &ops->func_stack);
230 	}
231 
232 	func->patched = true;
233 
234 	return 0;
235 
236 err:
237 	list_del_rcu(&func->stack_node);
238 	list_del(&ops->node);
239 	kfree(ops);
240 	return ret;
241 }
242 
243 static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
244 {
245 	struct klp_func *func;
246 
247 	klp_for_each_func(obj, func) {
248 		if (nops_only && !func->nop)
249 			continue;
250 
251 		if (func->patched)
252 			klp_unpatch_func(func);
253 	}
254 
255 	if (obj->dynamic || !nops_only)
256 		obj->patched = false;
257 }
258 
259 
260 void klp_unpatch_object(struct klp_object *obj)
261 {
262 	__klp_unpatch_object(obj, false);
263 }
264 
265 int klp_patch_object(struct klp_object *obj)
266 {
267 	struct klp_func *func;
268 	int ret;
269 
270 	if (WARN_ON(obj->patched))
271 		return -EINVAL;
272 
273 	klp_for_each_func(obj, func) {
274 		ret = klp_patch_func(func);
275 		if (ret) {
276 			klp_unpatch_object(obj);
277 			return ret;
278 		}
279 	}
280 	obj->patched = true;
281 
282 	return 0;
283 }
284 
285 static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
286 {
287 	struct klp_object *obj;
288 
289 	klp_for_each_object(patch, obj)
290 		if (obj->patched)
291 			__klp_unpatch_object(obj, nops_only);
292 }
293 
294 void klp_unpatch_objects(struct klp_patch *patch)
295 {
296 	__klp_unpatch_objects(patch, false);
297 }
298 
299 void klp_unpatch_objects_dynamic(struct klp_patch *patch)
300 {
301 	__klp_unpatch_objects(patch, true);
302 }
303