xref: /linux-6.15/kernel/livepatch/patch.c (revision d83a7cb3)
1c349cdcaSJosh Poimboeuf /*
2c349cdcaSJosh Poimboeuf  * patch.c - livepatch patching functions
3c349cdcaSJosh Poimboeuf  *
4c349cdcaSJosh Poimboeuf  * Copyright (C) 2014 Seth Jennings <[email protected]>
5c349cdcaSJosh Poimboeuf  * Copyright (C) 2014 SUSE
6c349cdcaSJosh Poimboeuf  * Copyright (C) 2015 Josh Poimboeuf <[email protected]>
7c349cdcaSJosh Poimboeuf  *
8c349cdcaSJosh Poimboeuf  * This program is free software; you can redistribute it and/or
9c349cdcaSJosh Poimboeuf  * modify it under the terms of the GNU General Public License
10c349cdcaSJosh Poimboeuf  * as published by the Free Software Foundation; either version 2
11c349cdcaSJosh Poimboeuf  * of the License, or (at your option) any later version.
12c349cdcaSJosh Poimboeuf  *
13c349cdcaSJosh Poimboeuf  * This program is distributed in the hope that it will be useful,
14c349cdcaSJosh Poimboeuf  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15c349cdcaSJosh Poimboeuf  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16c349cdcaSJosh Poimboeuf  * GNU General Public License for more details.
17c349cdcaSJosh Poimboeuf  *
18c349cdcaSJosh Poimboeuf  * You should have received a copy of the GNU General Public License
19c349cdcaSJosh Poimboeuf  * along with this program; if not, see <http://www.gnu.org/licenses/>.
20c349cdcaSJosh Poimboeuf  */
21c349cdcaSJosh Poimboeuf 
22c349cdcaSJosh Poimboeuf #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23c349cdcaSJosh Poimboeuf 
24c349cdcaSJosh Poimboeuf #include <linux/livepatch.h>
25c349cdcaSJosh Poimboeuf #include <linux/list.h>
26c349cdcaSJosh Poimboeuf #include <linux/ftrace.h>
27c349cdcaSJosh Poimboeuf #include <linux/rculist.h>
28c349cdcaSJosh Poimboeuf #include <linux/slab.h>
29c349cdcaSJosh Poimboeuf #include <linux/bug.h>
30c349cdcaSJosh Poimboeuf #include <linux/printk.h>
31c349cdcaSJosh Poimboeuf #include "patch.h"
32*d83a7cb3SJosh Poimboeuf #include "transition.h"
33c349cdcaSJosh Poimboeuf 
34c349cdcaSJosh Poimboeuf static LIST_HEAD(klp_ops);
35c349cdcaSJosh Poimboeuf 
36c349cdcaSJosh Poimboeuf struct klp_ops *klp_find_ops(unsigned long old_addr)
37c349cdcaSJosh Poimboeuf {
38c349cdcaSJosh Poimboeuf 	struct klp_ops *ops;
39c349cdcaSJosh Poimboeuf 	struct klp_func *func;
40c349cdcaSJosh Poimboeuf 
41c349cdcaSJosh Poimboeuf 	list_for_each_entry(ops, &klp_ops, node) {
42c349cdcaSJosh Poimboeuf 		func = list_first_entry(&ops->func_stack, struct klp_func,
43c349cdcaSJosh Poimboeuf 					stack_node);
44c349cdcaSJosh Poimboeuf 		if (func->old_addr == old_addr)
45c349cdcaSJosh Poimboeuf 			return ops;
46c349cdcaSJosh Poimboeuf 	}
47c349cdcaSJosh Poimboeuf 
48c349cdcaSJosh Poimboeuf 	return NULL;
49c349cdcaSJosh Poimboeuf }
50c349cdcaSJosh Poimboeuf 
51c349cdcaSJosh Poimboeuf static void notrace klp_ftrace_handler(unsigned long ip,
52c349cdcaSJosh Poimboeuf 				       unsigned long parent_ip,
53c349cdcaSJosh Poimboeuf 				       struct ftrace_ops *fops,
54c349cdcaSJosh Poimboeuf 				       struct pt_regs *regs)
55c349cdcaSJosh Poimboeuf {
56c349cdcaSJosh Poimboeuf 	struct klp_ops *ops;
57c349cdcaSJosh Poimboeuf 	struct klp_func *func;
58*d83a7cb3SJosh Poimboeuf 	int patch_state;
59c349cdcaSJosh Poimboeuf 
60c349cdcaSJosh Poimboeuf 	ops = container_of(fops, struct klp_ops, fops);
61c349cdcaSJosh Poimboeuf 
62c349cdcaSJosh Poimboeuf 	rcu_read_lock();
63*d83a7cb3SJosh Poimboeuf 
64c349cdcaSJosh Poimboeuf 	func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
65c349cdcaSJosh Poimboeuf 				      stack_node);
66*d83a7cb3SJosh Poimboeuf 
67*d83a7cb3SJosh Poimboeuf 	/*
68*d83a7cb3SJosh Poimboeuf 	 * func should never be NULL because preemption should be disabled here
69*d83a7cb3SJosh Poimboeuf 	 * and unregister_ftrace_function() does the equivalent of a
70*d83a7cb3SJosh Poimboeuf 	 * synchronize_sched() before the func_stack removal.
71*d83a7cb3SJosh Poimboeuf 	 */
72c349cdcaSJosh Poimboeuf 	if (WARN_ON_ONCE(!func))
73c349cdcaSJosh Poimboeuf 		goto unlock;
74c349cdcaSJosh Poimboeuf 
75*d83a7cb3SJosh Poimboeuf 	/*
76*d83a7cb3SJosh Poimboeuf 	 * In the enable path, enforce the order of the ops->func_stack and
77*d83a7cb3SJosh Poimboeuf 	 * func->transition reads.  The corresponding write barrier is in
78*d83a7cb3SJosh Poimboeuf 	 * __klp_enable_patch().
79*d83a7cb3SJosh Poimboeuf 	 *
80*d83a7cb3SJosh Poimboeuf 	 * (Note that this barrier technically isn't needed in the disable
81*d83a7cb3SJosh Poimboeuf 	 * path.  In the rare case where klp_update_patch_state() runs before
82*d83a7cb3SJosh Poimboeuf 	 * this handler, its TIF_PATCH_PENDING read and this func->transition
83*d83a7cb3SJosh Poimboeuf 	 * read need to be ordered.  But klp_update_patch_state() already
84*d83a7cb3SJosh Poimboeuf 	 * enforces that.)
85*d83a7cb3SJosh Poimboeuf 	 */
86*d83a7cb3SJosh Poimboeuf 	smp_rmb();
87*d83a7cb3SJosh Poimboeuf 
88*d83a7cb3SJosh Poimboeuf 	if (unlikely(func->transition)) {
89*d83a7cb3SJosh Poimboeuf 
90*d83a7cb3SJosh Poimboeuf 		/*
91*d83a7cb3SJosh Poimboeuf 		 * Enforce the order of the func->transition and
92*d83a7cb3SJosh Poimboeuf 		 * current->patch_state reads.  Otherwise we could read an
93*d83a7cb3SJosh Poimboeuf 		 * out-of-date task state and pick the wrong function.  The
94*d83a7cb3SJosh Poimboeuf 		 * corresponding write barrier is in klp_init_transition().
95*d83a7cb3SJosh Poimboeuf 		 */
96*d83a7cb3SJosh Poimboeuf 		smp_rmb();
97*d83a7cb3SJosh Poimboeuf 
98*d83a7cb3SJosh Poimboeuf 		patch_state = current->patch_state;
99*d83a7cb3SJosh Poimboeuf 
100*d83a7cb3SJosh Poimboeuf 		WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
101*d83a7cb3SJosh Poimboeuf 
102*d83a7cb3SJosh Poimboeuf 		if (patch_state == KLP_UNPATCHED) {
103*d83a7cb3SJosh Poimboeuf 			/*
104*d83a7cb3SJosh Poimboeuf 			 * Use the previously patched version of the function.
105*d83a7cb3SJosh Poimboeuf 			 * If no previous patches exist, continue with the
106*d83a7cb3SJosh Poimboeuf 			 * original function.
107*d83a7cb3SJosh Poimboeuf 			 */
108*d83a7cb3SJosh Poimboeuf 			func = list_entry_rcu(func->stack_node.next,
109*d83a7cb3SJosh Poimboeuf 					      struct klp_func, stack_node);
110*d83a7cb3SJosh Poimboeuf 
111*d83a7cb3SJosh Poimboeuf 			if (&func->stack_node == &ops->func_stack)
112*d83a7cb3SJosh Poimboeuf 				goto unlock;
113*d83a7cb3SJosh Poimboeuf 		}
114*d83a7cb3SJosh Poimboeuf 	}
115*d83a7cb3SJosh Poimboeuf 
116c349cdcaSJosh Poimboeuf 	klp_arch_set_pc(regs, (unsigned long)func->new_func);
117c349cdcaSJosh Poimboeuf unlock:
118c349cdcaSJosh Poimboeuf 	rcu_read_unlock();
119c349cdcaSJosh Poimboeuf }
120c349cdcaSJosh Poimboeuf 
121c349cdcaSJosh Poimboeuf /*
122c349cdcaSJosh Poimboeuf  * Convert a function address into the appropriate ftrace location.
123c349cdcaSJosh Poimboeuf  *
124c349cdcaSJosh Poimboeuf  * Usually this is just the address of the function, but on some architectures
125c349cdcaSJosh Poimboeuf  * it's more complicated so allow them to provide a custom behaviour.
126c349cdcaSJosh Poimboeuf  */
127c349cdcaSJosh Poimboeuf #ifndef klp_get_ftrace_location
128c349cdcaSJosh Poimboeuf static unsigned long klp_get_ftrace_location(unsigned long faddr)
129c349cdcaSJosh Poimboeuf {
130c349cdcaSJosh Poimboeuf 	return faddr;
131c349cdcaSJosh Poimboeuf }
132c349cdcaSJosh Poimboeuf #endif
133c349cdcaSJosh Poimboeuf 
134c349cdcaSJosh Poimboeuf static void klp_unpatch_func(struct klp_func *func)
135c349cdcaSJosh Poimboeuf {
136c349cdcaSJosh Poimboeuf 	struct klp_ops *ops;
137c349cdcaSJosh Poimboeuf 
138c349cdcaSJosh Poimboeuf 	if (WARN_ON(!func->patched))
139c349cdcaSJosh Poimboeuf 		return;
140c349cdcaSJosh Poimboeuf 	if (WARN_ON(!func->old_addr))
141c349cdcaSJosh Poimboeuf 		return;
142c349cdcaSJosh Poimboeuf 
143c349cdcaSJosh Poimboeuf 	ops = klp_find_ops(func->old_addr);
144c349cdcaSJosh Poimboeuf 	if (WARN_ON(!ops))
145c349cdcaSJosh Poimboeuf 		return;
146c349cdcaSJosh Poimboeuf 
147c349cdcaSJosh Poimboeuf 	if (list_is_singular(&ops->func_stack)) {
148c349cdcaSJosh Poimboeuf 		unsigned long ftrace_loc;
149c349cdcaSJosh Poimboeuf 
150c349cdcaSJosh Poimboeuf 		ftrace_loc = klp_get_ftrace_location(func->old_addr);
151c349cdcaSJosh Poimboeuf 		if (WARN_ON(!ftrace_loc))
152c349cdcaSJosh Poimboeuf 			return;
153c349cdcaSJosh Poimboeuf 
154c349cdcaSJosh Poimboeuf 		WARN_ON(unregister_ftrace_function(&ops->fops));
155c349cdcaSJosh Poimboeuf 		WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
156c349cdcaSJosh Poimboeuf 
157c349cdcaSJosh Poimboeuf 		list_del_rcu(&func->stack_node);
158c349cdcaSJosh Poimboeuf 		list_del(&ops->node);
159c349cdcaSJosh Poimboeuf 		kfree(ops);
160c349cdcaSJosh Poimboeuf 	} else {
161c349cdcaSJosh Poimboeuf 		list_del_rcu(&func->stack_node);
162c349cdcaSJosh Poimboeuf 	}
163c349cdcaSJosh Poimboeuf 
164c349cdcaSJosh Poimboeuf 	func->patched = false;
165c349cdcaSJosh Poimboeuf }
166c349cdcaSJosh Poimboeuf 
167c349cdcaSJosh Poimboeuf static int klp_patch_func(struct klp_func *func)
168c349cdcaSJosh Poimboeuf {
169c349cdcaSJosh Poimboeuf 	struct klp_ops *ops;
170c349cdcaSJosh Poimboeuf 	int ret;
171c349cdcaSJosh Poimboeuf 
172c349cdcaSJosh Poimboeuf 	if (WARN_ON(!func->old_addr))
173c349cdcaSJosh Poimboeuf 		return -EINVAL;
174c349cdcaSJosh Poimboeuf 
175c349cdcaSJosh Poimboeuf 	if (WARN_ON(func->patched))
176c349cdcaSJosh Poimboeuf 		return -EINVAL;
177c349cdcaSJosh Poimboeuf 
178c349cdcaSJosh Poimboeuf 	ops = klp_find_ops(func->old_addr);
179c349cdcaSJosh Poimboeuf 	if (!ops) {
180c349cdcaSJosh Poimboeuf 		unsigned long ftrace_loc;
181c349cdcaSJosh Poimboeuf 
182c349cdcaSJosh Poimboeuf 		ftrace_loc = klp_get_ftrace_location(func->old_addr);
183c349cdcaSJosh Poimboeuf 		if (!ftrace_loc) {
184c349cdcaSJosh Poimboeuf 			pr_err("failed to find location for function '%s'\n",
185c349cdcaSJosh Poimboeuf 				func->old_name);
186c349cdcaSJosh Poimboeuf 			return -EINVAL;
187c349cdcaSJosh Poimboeuf 		}
188c349cdcaSJosh Poimboeuf 
189c349cdcaSJosh Poimboeuf 		ops = kzalloc(sizeof(*ops), GFP_KERNEL);
190c349cdcaSJosh Poimboeuf 		if (!ops)
191c349cdcaSJosh Poimboeuf 			return -ENOMEM;
192c349cdcaSJosh Poimboeuf 
193c349cdcaSJosh Poimboeuf 		ops->fops.func = klp_ftrace_handler;
194c349cdcaSJosh Poimboeuf 		ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
195c349cdcaSJosh Poimboeuf 				  FTRACE_OPS_FL_DYNAMIC |
196c349cdcaSJosh Poimboeuf 				  FTRACE_OPS_FL_IPMODIFY;
197c349cdcaSJosh Poimboeuf 
198c349cdcaSJosh Poimboeuf 		list_add(&ops->node, &klp_ops);
199c349cdcaSJosh Poimboeuf 
200c349cdcaSJosh Poimboeuf 		INIT_LIST_HEAD(&ops->func_stack);
201c349cdcaSJosh Poimboeuf 		list_add_rcu(&func->stack_node, &ops->func_stack);
202c349cdcaSJosh Poimboeuf 
203c349cdcaSJosh Poimboeuf 		ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
204c349cdcaSJosh Poimboeuf 		if (ret) {
205c349cdcaSJosh Poimboeuf 			pr_err("failed to set ftrace filter for function '%s' (%d)\n",
206c349cdcaSJosh Poimboeuf 			       func->old_name, ret);
207c349cdcaSJosh Poimboeuf 			goto err;
208c349cdcaSJosh Poimboeuf 		}
209c349cdcaSJosh Poimboeuf 
210c349cdcaSJosh Poimboeuf 		ret = register_ftrace_function(&ops->fops);
211c349cdcaSJosh Poimboeuf 		if (ret) {
212c349cdcaSJosh Poimboeuf 			pr_err("failed to register ftrace handler for function '%s' (%d)\n",
213c349cdcaSJosh Poimboeuf 			       func->old_name, ret);
214c349cdcaSJosh Poimboeuf 			ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
215c349cdcaSJosh Poimboeuf 			goto err;
216c349cdcaSJosh Poimboeuf 		}
217c349cdcaSJosh Poimboeuf 
218c349cdcaSJosh Poimboeuf 
219c349cdcaSJosh Poimboeuf 	} else {
220c349cdcaSJosh Poimboeuf 		list_add_rcu(&func->stack_node, &ops->func_stack);
221c349cdcaSJosh Poimboeuf 	}
222c349cdcaSJosh Poimboeuf 
223c349cdcaSJosh Poimboeuf 	func->patched = true;
224c349cdcaSJosh Poimboeuf 
225c349cdcaSJosh Poimboeuf 	return 0;
226c349cdcaSJosh Poimboeuf 
227c349cdcaSJosh Poimboeuf err:
228c349cdcaSJosh Poimboeuf 	list_del_rcu(&func->stack_node);
229c349cdcaSJosh Poimboeuf 	list_del(&ops->node);
230c349cdcaSJosh Poimboeuf 	kfree(ops);
231c349cdcaSJosh Poimboeuf 	return ret;
232c349cdcaSJosh Poimboeuf }
233c349cdcaSJosh Poimboeuf 
234c349cdcaSJosh Poimboeuf void klp_unpatch_object(struct klp_object *obj)
235c349cdcaSJosh Poimboeuf {
236c349cdcaSJosh Poimboeuf 	struct klp_func *func;
237c349cdcaSJosh Poimboeuf 
238c349cdcaSJosh Poimboeuf 	klp_for_each_func(obj, func)
239c349cdcaSJosh Poimboeuf 		if (func->patched)
240c349cdcaSJosh Poimboeuf 			klp_unpatch_func(func);
241c349cdcaSJosh Poimboeuf 
242c349cdcaSJosh Poimboeuf 	obj->patched = false;
243c349cdcaSJosh Poimboeuf }
244c349cdcaSJosh Poimboeuf 
245c349cdcaSJosh Poimboeuf int klp_patch_object(struct klp_object *obj)
246c349cdcaSJosh Poimboeuf {
247c349cdcaSJosh Poimboeuf 	struct klp_func *func;
248c349cdcaSJosh Poimboeuf 	int ret;
249c349cdcaSJosh Poimboeuf 
250c349cdcaSJosh Poimboeuf 	if (WARN_ON(obj->patched))
251c349cdcaSJosh Poimboeuf 		return -EINVAL;
252c349cdcaSJosh Poimboeuf 
253c349cdcaSJosh Poimboeuf 	klp_for_each_func(obj, func) {
254c349cdcaSJosh Poimboeuf 		ret = klp_patch_func(func);
255c349cdcaSJosh Poimboeuf 		if (ret) {
256c349cdcaSJosh Poimboeuf 			klp_unpatch_object(obj);
257c349cdcaSJosh Poimboeuf 			return ret;
258c349cdcaSJosh Poimboeuf 		}
259c349cdcaSJosh Poimboeuf 	}
260c349cdcaSJosh Poimboeuf 	obj->patched = true;
261c349cdcaSJosh Poimboeuf 
262c349cdcaSJosh Poimboeuf 	return 0;
263c349cdcaSJosh Poimboeuf }
264*d83a7cb3SJosh Poimboeuf 
265*d83a7cb3SJosh Poimboeuf void klp_unpatch_objects(struct klp_patch *patch)
266*d83a7cb3SJosh Poimboeuf {
267*d83a7cb3SJosh Poimboeuf 	struct klp_object *obj;
268*d83a7cb3SJosh Poimboeuf 
269*d83a7cb3SJosh Poimboeuf 	klp_for_each_object(patch, obj)
270*d83a7cb3SJosh Poimboeuf 		if (obj->patched)
271*d83a7cb3SJosh Poimboeuf 			klp_unpatch_object(obj);
272*d83a7cb3SJosh Poimboeuf }
273