1 /* 2 * patch.c - livepatch patching functions 3 * 4 * Copyright (C) 2014 Seth Jennings <[email protected]> 5 * Copyright (C) 2014 SUSE 6 * Copyright (C) 2015 Josh Poimboeuf <[email protected]> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 2 11 * of the License, or (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, see <http://www.gnu.org/licenses/>. 20 */ 21 22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 24 #include <linux/livepatch.h> 25 #include <linux/list.h> 26 #include <linux/ftrace.h> 27 #include <linux/rculist.h> 28 #include <linux/slab.h> 29 #include <linux/bug.h> 30 #include <linux/printk.h> 31 #include "patch.h" 32 33 static LIST_HEAD(klp_ops); 34 35 struct klp_ops *klp_find_ops(unsigned long old_addr) 36 { 37 struct klp_ops *ops; 38 struct klp_func *func; 39 40 list_for_each_entry(ops, &klp_ops, node) { 41 func = list_first_entry(&ops->func_stack, struct klp_func, 42 stack_node); 43 if (func->old_addr == old_addr) 44 return ops; 45 } 46 47 return NULL; 48 } 49 50 static void notrace klp_ftrace_handler(unsigned long ip, 51 unsigned long parent_ip, 52 struct ftrace_ops *fops, 53 struct pt_regs *regs) 54 { 55 struct klp_ops *ops; 56 struct klp_func *func; 57 58 ops = container_of(fops, struct klp_ops, fops); 59 60 rcu_read_lock(); 61 func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, 62 stack_node); 63 if (WARN_ON_ONCE(!func)) 64 goto unlock; 65 66 klp_arch_set_pc(regs, (unsigned long)func->new_func); 67 unlock: 68 rcu_read_unlock(); 69 } 70 71 /* 72 * Convert a function address into the appropriate ftrace location. 73 * 74 * Usually this is just the address of the function, but on some architectures 75 * it's more complicated so allow them to provide a custom behaviour. 76 */ 77 #ifndef klp_get_ftrace_location 78 static unsigned long klp_get_ftrace_location(unsigned long faddr) 79 { 80 return faddr; 81 } 82 #endif 83 84 static void klp_unpatch_func(struct klp_func *func) 85 { 86 struct klp_ops *ops; 87 88 if (WARN_ON(!func->patched)) 89 return; 90 if (WARN_ON(!func->old_addr)) 91 return; 92 93 ops = klp_find_ops(func->old_addr); 94 if (WARN_ON(!ops)) 95 return; 96 97 if (list_is_singular(&ops->func_stack)) { 98 unsigned long ftrace_loc; 99 100 ftrace_loc = klp_get_ftrace_location(func->old_addr); 101 if (WARN_ON(!ftrace_loc)) 102 return; 103 104 WARN_ON(unregister_ftrace_function(&ops->fops)); 105 WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0)); 106 107 list_del_rcu(&func->stack_node); 108 list_del(&ops->node); 109 kfree(ops); 110 } else { 111 list_del_rcu(&func->stack_node); 112 } 113 114 func->patched = false; 115 } 116 117 static int klp_patch_func(struct klp_func *func) 118 { 119 struct klp_ops *ops; 120 int ret; 121 122 if (WARN_ON(!func->old_addr)) 123 return -EINVAL; 124 125 if (WARN_ON(func->patched)) 126 return -EINVAL; 127 128 ops = klp_find_ops(func->old_addr); 129 if (!ops) { 130 unsigned long ftrace_loc; 131 132 ftrace_loc = klp_get_ftrace_location(func->old_addr); 133 if (!ftrace_loc) { 134 pr_err("failed to find location for function '%s'\n", 135 func->old_name); 136 return -EINVAL; 137 } 138 139 ops = kzalloc(sizeof(*ops), GFP_KERNEL); 140 if (!ops) 141 return -ENOMEM; 142 143 ops->fops.func = klp_ftrace_handler; 144 ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS | 145 FTRACE_OPS_FL_DYNAMIC | 146 FTRACE_OPS_FL_IPMODIFY; 147 148 list_add(&ops->node, &klp_ops); 149 150 INIT_LIST_HEAD(&ops->func_stack); 151 list_add_rcu(&func->stack_node, &ops->func_stack); 152 153 ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0); 154 if (ret) { 155 pr_err("failed to set ftrace filter for function '%s' (%d)\n", 156 func->old_name, ret); 157 goto err; 158 } 159 160 ret = register_ftrace_function(&ops->fops); 161 if (ret) { 162 pr_err("failed to register ftrace handler for function '%s' (%d)\n", 163 func->old_name, ret); 164 ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0); 165 goto err; 166 } 167 168 169 } else { 170 list_add_rcu(&func->stack_node, &ops->func_stack); 171 } 172 173 func->patched = true; 174 175 return 0; 176 177 err: 178 list_del_rcu(&func->stack_node); 179 list_del(&ops->node); 180 kfree(ops); 181 return ret; 182 } 183 184 void klp_unpatch_object(struct klp_object *obj) 185 { 186 struct klp_func *func; 187 188 klp_for_each_func(obj, func) 189 if (func->patched) 190 klp_unpatch_func(func); 191 192 obj->patched = false; 193 } 194 195 int klp_patch_object(struct klp_object *obj) 196 { 197 struct klp_func *func; 198 int ret; 199 200 if (WARN_ON(obj->patched)) 201 return -EINVAL; 202 203 klp_for_each_func(obj, func) { 204 ret = klp_patch_func(func); 205 if (ret) { 206 klp_unpatch_object(obj); 207 return ret; 208 } 209 } 210 obj->patched = true; 211 212 return 0; 213 } 214