xref: /linux-6.15/kernel/livepatch/patch.c (revision 2860cd8a)
11ccea77eSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2c349cdcaSJosh Poimboeuf /*
3c349cdcaSJosh Poimboeuf  * patch.c - livepatch patching functions
4c349cdcaSJosh Poimboeuf  *
5c349cdcaSJosh Poimboeuf  * Copyright (C) 2014 Seth Jennings <[email protected]>
6c349cdcaSJosh Poimboeuf  * Copyright (C) 2014 SUSE
7c349cdcaSJosh Poimboeuf  * Copyright (C) 2015 Josh Poimboeuf <[email protected]>
8c349cdcaSJosh Poimboeuf  */
9c349cdcaSJosh Poimboeuf 
10c349cdcaSJosh Poimboeuf #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11c349cdcaSJosh Poimboeuf 
12c349cdcaSJosh Poimboeuf #include <linux/livepatch.h>
13c349cdcaSJosh Poimboeuf #include <linux/list.h>
14c349cdcaSJosh Poimboeuf #include <linux/ftrace.h>
15c349cdcaSJosh Poimboeuf #include <linux/rculist.h>
16c349cdcaSJosh Poimboeuf #include <linux/slab.h>
17c349cdcaSJosh Poimboeuf #include <linux/bug.h>
18c349cdcaSJosh Poimboeuf #include <linux/printk.h>
1993862e38SJoe Lawrence #include "core.h"
20c349cdcaSJosh Poimboeuf #include "patch.h"
21d83a7cb3SJosh Poimboeuf #include "transition.h"
22c349cdcaSJosh Poimboeuf 
23c349cdcaSJosh Poimboeuf static LIST_HEAD(klp_ops);
24c349cdcaSJosh Poimboeuf 
2519514910SPetr Mladek struct klp_ops *klp_find_ops(void *old_func)
26c349cdcaSJosh Poimboeuf {
27c349cdcaSJosh Poimboeuf 	struct klp_ops *ops;
28c349cdcaSJosh Poimboeuf 	struct klp_func *func;
29c349cdcaSJosh Poimboeuf 
30c349cdcaSJosh Poimboeuf 	list_for_each_entry(ops, &klp_ops, node) {
31c349cdcaSJosh Poimboeuf 		func = list_first_entry(&ops->func_stack, struct klp_func,
32c349cdcaSJosh Poimboeuf 					stack_node);
3319514910SPetr Mladek 		if (func->old_func == old_func)
34c349cdcaSJosh Poimboeuf 			return ops;
35c349cdcaSJosh Poimboeuf 	}
36c349cdcaSJosh Poimboeuf 
37c349cdcaSJosh Poimboeuf 	return NULL;
38c349cdcaSJosh Poimboeuf }
39c349cdcaSJosh Poimboeuf 
40c349cdcaSJosh Poimboeuf static void notrace klp_ftrace_handler(unsigned long ip,
41c349cdcaSJosh Poimboeuf 				       unsigned long parent_ip,
42c349cdcaSJosh Poimboeuf 				       struct ftrace_ops *fops,
43d19ad077SSteven Rostedt (VMware) 				       struct ftrace_regs *fregs)
44c349cdcaSJosh Poimboeuf {
45c349cdcaSJosh Poimboeuf 	struct klp_ops *ops;
46c349cdcaSJosh Poimboeuf 	struct klp_func *func;
47d83a7cb3SJosh Poimboeuf 	int patch_state;
4813f3ea9aSSteven Rostedt (VMware) 	int bit;
49c349cdcaSJosh Poimboeuf 
50c349cdcaSJosh Poimboeuf 	ops = container_of(fops, struct klp_ops, fops);
51c349cdcaSJosh Poimboeuf 
52773c1670SSteven Rostedt (VMware) 	bit = ftrace_test_recursion_trylock(ip, parent_ip);
534b750b57SSteven Rostedt (VMware) 	if (WARN_ON_ONCE(bit < 0))
5413f3ea9aSSteven Rostedt (VMware) 		return;
55842c0884SPetr Mladek 	/*
566932689eSPaul E. McKenney 	 * A variant of synchronize_rcu() is used to allow patching functions
57842c0884SPetr Mladek 	 * where RCU is not watching, see klp_synchronize_transition().
58842c0884SPetr Mladek 	 */
59842c0884SPetr Mladek 	preempt_disable_notrace();
60d83a7cb3SJosh Poimboeuf 
61c349cdcaSJosh Poimboeuf 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
62c349cdcaSJosh Poimboeuf 				      stack_node);
63d83a7cb3SJosh Poimboeuf 
64d83a7cb3SJosh Poimboeuf 	/*
65d83a7cb3SJosh Poimboeuf 	 * func should never be NULL because preemption should be disabled here
66d83a7cb3SJosh Poimboeuf 	 * and unregister_ftrace_function() does the equivalent of a
676932689eSPaul E. McKenney 	 * synchronize_rcu() before the func_stack removal.
68d83a7cb3SJosh Poimboeuf 	 */
69c349cdcaSJosh Poimboeuf 	if (WARN_ON_ONCE(!func))
70c349cdcaSJosh Poimboeuf 		goto unlock;
71c349cdcaSJosh Poimboeuf 
72d83a7cb3SJosh Poimboeuf 	/*
73d83a7cb3SJosh Poimboeuf 	 * In the enable path, enforce the order of the ops->func_stack and
74d83a7cb3SJosh Poimboeuf 	 * func->transition reads.  The corresponding write barrier is in
75d83a7cb3SJosh Poimboeuf 	 * __klp_enable_patch().
76d83a7cb3SJosh Poimboeuf 	 *
77d83a7cb3SJosh Poimboeuf 	 * (Note that this barrier technically isn't needed in the disable
78d83a7cb3SJosh Poimboeuf 	 * path.  In the rare case where klp_update_patch_state() runs before
79d83a7cb3SJosh Poimboeuf 	 * this handler, its TIF_PATCH_PENDING read and this func->transition
80d83a7cb3SJosh Poimboeuf 	 * read need to be ordered.  But klp_update_patch_state() already
81d83a7cb3SJosh Poimboeuf 	 * enforces that.)
82d83a7cb3SJosh Poimboeuf 	 */
83d83a7cb3SJosh Poimboeuf 	smp_rmb();
84d83a7cb3SJosh Poimboeuf 
85d83a7cb3SJosh Poimboeuf 	if (unlikely(func->transition)) {
86d83a7cb3SJosh Poimboeuf 
87d83a7cb3SJosh Poimboeuf 		/*
88d83a7cb3SJosh Poimboeuf 		 * Enforce the order of the func->transition and
89d83a7cb3SJosh Poimboeuf 		 * current->patch_state reads.  Otherwise we could read an
90d83a7cb3SJosh Poimboeuf 		 * out-of-date task state and pick the wrong function.  The
91d83a7cb3SJosh Poimboeuf 		 * corresponding write barrier is in klp_init_transition().
92d83a7cb3SJosh Poimboeuf 		 */
93d83a7cb3SJosh Poimboeuf 		smp_rmb();
94d83a7cb3SJosh Poimboeuf 
95d83a7cb3SJosh Poimboeuf 		patch_state = current->patch_state;
96d83a7cb3SJosh Poimboeuf 
97d83a7cb3SJosh Poimboeuf 		WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
98d83a7cb3SJosh Poimboeuf 
99d83a7cb3SJosh Poimboeuf 		if (patch_state == KLP_UNPATCHED) {
100d83a7cb3SJosh Poimboeuf 			/*
101d83a7cb3SJosh Poimboeuf 			 * Use the previously patched version of the function.
102d83a7cb3SJosh Poimboeuf 			 * If no previous patches exist, continue with the
103d83a7cb3SJosh Poimboeuf 			 * original function.
104d83a7cb3SJosh Poimboeuf 			 */
105d83a7cb3SJosh Poimboeuf 			func = list_entry_rcu(func->stack_node.next,
106d83a7cb3SJosh Poimboeuf 					      struct klp_func, stack_node);
107d83a7cb3SJosh Poimboeuf 
108d83a7cb3SJosh Poimboeuf 			if (&func->stack_node == &ops->func_stack)
109d83a7cb3SJosh Poimboeuf 				goto unlock;
110d83a7cb3SJosh Poimboeuf 		}
111d83a7cb3SJosh Poimboeuf 	}
112d83a7cb3SJosh Poimboeuf 
113e1452b60SJason Baron 	/*
114e1452b60SJason Baron 	 * NOPs are used to replace existing patches with original code.
115e1452b60SJason Baron 	 * Do nothing! Setting pc would cause an infinite loop.
116e1452b60SJason Baron 	 */
117e1452b60SJason Baron 	if (func->nop)
118e1452b60SJason Baron 		goto unlock;
119e1452b60SJason Baron 
120*2860cd8aSSteven Rostedt (VMware) 	klp_arch_set_pc(fregs, (unsigned long)func->new_func);
121e1452b60SJason Baron 
122c349cdcaSJosh Poimboeuf unlock:
123842c0884SPetr Mladek 	preempt_enable_notrace();
12413f3ea9aSSteven Rostedt (VMware) 	ftrace_test_recursion_unlock(bit);
125c349cdcaSJosh Poimboeuf }
126c349cdcaSJosh Poimboeuf 
127c349cdcaSJosh Poimboeuf /*
128c349cdcaSJosh Poimboeuf  * Convert a function address into the appropriate ftrace location.
129c349cdcaSJosh Poimboeuf  *
130c349cdcaSJosh Poimboeuf  * Usually this is just the address of the function, but on some architectures
131c349cdcaSJosh Poimboeuf  * it's more complicated so allow them to provide a custom behaviour.
132c349cdcaSJosh Poimboeuf  */
133c349cdcaSJosh Poimboeuf #ifndef klp_get_ftrace_location
134c349cdcaSJosh Poimboeuf static unsigned long klp_get_ftrace_location(unsigned long faddr)
135c349cdcaSJosh Poimboeuf {
136c349cdcaSJosh Poimboeuf 	return faddr;
137c349cdcaSJosh Poimboeuf }
138c349cdcaSJosh Poimboeuf #endif
139c349cdcaSJosh Poimboeuf 
140c349cdcaSJosh Poimboeuf static void klp_unpatch_func(struct klp_func *func)
141c349cdcaSJosh Poimboeuf {
142c349cdcaSJosh Poimboeuf 	struct klp_ops *ops;
143c349cdcaSJosh Poimboeuf 
144c349cdcaSJosh Poimboeuf 	if (WARN_ON(!func->patched))
145c349cdcaSJosh Poimboeuf 		return;
14619514910SPetr Mladek 	if (WARN_ON(!func->old_func))
147c349cdcaSJosh Poimboeuf 		return;
148c349cdcaSJosh Poimboeuf 
14919514910SPetr Mladek 	ops = klp_find_ops(func->old_func);
150c349cdcaSJosh Poimboeuf 	if (WARN_ON(!ops))
151c349cdcaSJosh Poimboeuf 		return;
152c349cdcaSJosh Poimboeuf 
153c349cdcaSJosh Poimboeuf 	if (list_is_singular(&ops->func_stack)) {
154c349cdcaSJosh Poimboeuf 		unsigned long ftrace_loc;
155c349cdcaSJosh Poimboeuf 
15619514910SPetr Mladek 		ftrace_loc =
15719514910SPetr Mladek 			klp_get_ftrace_location((unsigned long)func->old_func);
158c349cdcaSJosh Poimboeuf 		if (WARN_ON(!ftrace_loc))
159c349cdcaSJosh Poimboeuf 			return;
160c349cdcaSJosh Poimboeuf 
161c349cdcaSJosh Poimboeuf 		WARN_ON(unregister_ftrace_function(&ops->fops));
162c349cdcaSJosh Poimboeuf 		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
163c349cdcaSJosh Poimboeuf 
164c349cdcaSJosh Poimboeuf 		list_del_rcu(&func->stack_node);
165c349cdcaSJosh Poimboeuf 		list_del(&ops->node);
166c349cdcaSJosh Poimboeuf 		kfree(ops);
167c349cdcaSJosh Poimboeuf 	} else {
168c349cdcaSJosh Poimboeuf 		list_del_rcu(&func->stack_node);
169c349cdcaSJosh Poimboeuf 	}
170c349cdcaSJosh Poimboeuf 
171c349cdcaSJosh Poimboeuf 	func->patched = false;
172c349cdcaSJosh Poimboeuf }
173c349cdcaSJosh Poimboeuf 
174c349cdcaSJosh Poimboeuf static int klp_patch_func(struct klp_func *func)
175c349cdcaSJosh Poimboeuf {
176c349cdcaSJosh Poimboeuf 	struct klp_ops *ops;
177c349cdcaSJosh Poimboeuf 	int ret;
178c349cdcaSJosh Poimboeuf 
17919514910SPetr Mladek 	if (WARN_ON(!func->old_func))
180c349cdcaSJosh Poimboeuf 		return -EINVAL;
181c349cdcaSJosh Poimboeuf 
182c349cdcaSJosh Poimboeuf 	if (WARN_ON(func->patched))
183c349cdcaSJosh Poimboeuf 		return -EINVAL;
184c349cdcaSJosh Poimboeuf 
18519514910SPetr Mladek 	ops = klp_find_ops(func->old_func);
186c349cdcaSJosh Poimboeuf 	if (!ops) {
187c349cdcaSJosh Poimboeuf 		unsigned long ftrace_loc;
188c349cdcaSJosh Poimboeuf 
18919514910SPetr Mladek 		ftrace_loc =
19019514910SPetr Mladek 			klp_get_ftrace_location((unsigned long)func->old_func);
191c349cdcaSJosh Poimboeuf 		if (!ftrace_loc) {
192c349cdcaSJosh Poimboeuf 			pr_err("failed to find location for function '%s'\n",
193c349cdcaSJosh Poimboeuf 				func->old_name);
194c349cdcaSJosh Poimboeuf 			return -EINVAL;
195c349cdcaSJosh Poimboeuf 		}
196c349cdcaSJosh Poimboeuf 
197c349cdcaSJosh Poimboeuf 		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
198c349cdcaSJosh Poimboeuf 		if (!ops)
199c349cdcaSJosh Poimboeuf 			return -ENOMEM;
200c349cdcaSJosh Poimboeuf 
201c349cdcaSJosh Poimboeuf 		ops->fops.func = klp_ftrace_handler;
202*2860cd8aSSteven Rostedt (VMware) 		ops->fops.flags = FTRACE_OPS_FL_DYNAMIC |
203*2860cd8aSSteven Rostedt (VMware) #ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
204*2860cd8aSSteven Rostedt (VMware) 				  FTRACE_OPS_FL_SAVE_REGS |
205*2860cd8aSSteven Rostedt (VMware) #endif
2067162431dSMiroslav Benes 				  FTRACE_OPS_FL_IPMODIFY |
2077162431dSMiroslav Benes 				  FTRACE_OPS_FL_PERMANENT;
208c349cdcaSJosh Poimboeuf 
209c349cdcaSJosh Poimboeuf 		list_add(&ops->node, &klp_ops);
210c349cdcaSJosh Poimboeuf 
211c349cdcaSJosh Poimboeuf 		INIT_LIST_HEAD(&ops->func_stack);
212c349cdcaSJosh Poimboeuf 		list_add_rcu(&func->stack_node, &ops->func_stack);
213c349cdcaSJosh Poimboeuf 
214c349cdcaSJosh Poimboeuf 		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
215c349cdcaSJosh Poimboeuf 		if (ret) {
216c349cdcaSJosh Poimboeuf 			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
217c349cdcaSJosh Poimboeuf 			       func->old_name, ret);
218c349cdcaSJosh Poimboeuf 			goto err;
219c349cdcaSJosh Poimboeuf 		}
220c349cdcaSJosh Poimboeuf 
221c349cdcaSJosh Poimboeuf 		ret = register_ftrace_function(&ops->fops);
222c349cdcaSJosh Poimboeuf 		if (ret) {
223c349cdcaSJosh Poimboeuf 			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
224c349cdcaSJosh Poimboeuf 			       func->old_name, ret);
225c349cdcaSJosh Poimboeuf 			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
226c349cdcaSJosh Poimboeuf 			goto err;
227c349cdcaSJosh Poimboeuf 		}
228c349cdcaSJosh Poimboeuf 
229c349cdcaSJosh Poimboeuf 
230c349cdcaSJosh Poimboeuf 	} else {
231c349cdcaSJosh Poimboeuf 		list_add_rcu(&func->stack_node, &ops->func_stack);
232c349cdcaSJosh Poimboeuf 	}
233c349cdcaSJosh Poimboeuf 
234c349cdcaSJosh Poimboeuf 	func->patched = true;
235c349cdcaSJosh Poimboeuf 
236c349cdcaSJosh Poimboeuf 	return 0;
237c349cdcaSJosh Poimboeuf 
238c349cdcaSJosh Poimboeuf err:
239c349cdcaSJosh Poimboeuf 	list_del_rcu(&func->stack_node);
240c349cdcaSJosh Poimboeuf 	list_del(&ops->node);
241c349cdcaSJosh Poimboeuf 	kfree(ops);
242c349cdcaSJosh Poimboeuf 	return ret;
243c349cdcaSJosh Poimboeuf }
244c349cdcaSJosh Poimboeuf 
245d697bad5SPetr Mladek static void __klp_unpatch_object(struct klp_object *obj, bool nops_only)
246c349cdcaSJosh Poimboeuf {
247c349cdcaSJosh Poimboeuf 	struct klp_func *func;
248c349cdcaSJosh Poimboeuf 
249d697bad5SPetr Mladek 	klp_for_each_func(obj, func) {
250d697bad5SPetr Mladek 		if (nops_only && !func->nop)
251d697bad5SPetr Mladek 			continue;
252d697bad5SPetr Mladek 
253c349cdcaSJosh Poimboeuf 		if (func->patched)
254c349cdcaSJosh Poimboeuf 			klp_unpatch_func(func);
255d697bad5SPetr Mladek 	}
256c349cdcaSJosh Poimboeuf 
257d697bad5SPetr Mladek 	if (obj->dynamic || !nops_only)
258c349cdcaSJosh Poimboeuf 		obj->patched = false;
259c349cdcaSJosh Poimboeuf }
260c349cdcaSJosh Poimboeuf 
261d697bad5SPetr Mladek 
262d697bad5SPetr Mladek void klp_unpatch_object(struct klp_object *obj)
263d697bad5SPetr Mladek {
264d697bad5SPetr Mladek 	__klp_unpatch_object(obj, false);
265d697bad5SPetr Mladek }
266d697bad5SPetr Mladek 
267c349cdcaSJosh Poimboeuf int klp_patch_object(struct klp_object *obj)
268c349cdcaSJosh Poimboeuf {
269c349cdcaSJosh Poimboeuf 	struct klp_func *func;
270c349cdcaSJosh Poimboeuf 	int ret;
271c349cdcaSJosh Poimboeuf 
272c349cdcaSJosh Poimboeuf 	if (WARN_ON(obj->patched))
273c349cdcaSJosh Poimboeuf 		return -EINVAL;
274c349cdcaSJosh Poimboeuf 
275c349cdcaSJosh Poimboeuf 	klp_for_each_func(obj, func) {
276c349cdcaSJosh Poimboeuf 		ret = klp_patch_func(func);
277c349cdcaSJosh Poimboeuf 		if (ret) {
278c349cdcaSJosh Poimboeuf 			klp_unpatch_object(obj);
279c349cdcaSJosh Poimboeuf 			return ret;
280c349cdcaSJosh Poimboeuf 		}
281c349cdcaSJosh Poimboeuf 	}
282c349cdcaSJosh Poimboeuf 	obj->patched = true;
283c349cdcaSJosh Poimboeuf 
284c349cdcaSJosh Poimboeuf 	return 0;
285c349cdcaSJosh Poimboeuf }
286d83a7cb3SJosh Poimboeuf 
287d697bad5SPetr Mladek static void __klp_unpatch_objects(struct klp_patch *patch, bool nops_only)
288d83a7cb3SJosh Poimboeuf {
289d83a7cb3SJosh Poimboeuf 	struct klp_object *obj;
290d83a7cb3SJosh Poimboeuf 
291d83a7cb3SJosh Poimboeuf 	klp_for_each_object(patch, obj)
292d83a7cb3SJosh Poimboeuf 		if (obj->patched)
293d697bad5SPetr Mladek 			__klp_unpatch_object(obj, nops_only);
294d697bad5SPetr Mladek }
295d697bad5SPetr Mladek 
296d697bad5SPetr Mladek void klp_unpatch_objects(struct klp_patch *patch)
297d697bad5SPetr Mladek {
298d697bad5SPetr Mladek 	__klp_unpatch_objects(patch, false);
299d697bad5SPetr Mladek }
300d697bad5SPetr Mladek 
301d697bad5SPetr Mladek void klp_unpatch_objects_dynamic(struct klp_patch *patch)
302d697bad5SPetr Mladek {
303d697bad5SPetr Mladek 	__klp_unpatch_objects(patch, true);
304d83a7cb3SJosh Poimboeuf }
305