1 /* 2 * livepatch.h - Kernel Live Patching Core 3 * 4 * Copyright (C) 2014 Seth Jennings <[email protected]> 5 * Copyright (C) 2014 SUSE 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 2 10 * of the License, or (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, see <http://www.gnu.org/licenses/>. 19 */ 20 21 #ifndef _LINUX_LIVEPATCH_H_ 22 #define _LINUX_LIVEPATCH_H_ 23 24 #include <linux/module.h> 25 #include <linux/ftrace.h> 26 #include <linux/completion.h> 27 #include <linux/list.h> 28 29 #if IS_ENABLED(CONFIG_LIVEPATCH) 30 31 #include <asm/livepatch.h> 32 33 /* task patch states */ 34 #define KLP_UNDEFINED -1 35 #define KLP_UNPATCHED 0 36 #define KLP_PATCHED 1 37 38 /** 39 * struct klp_func - function structure for live patching 40 * @old_name: name of the function to be patched 41 * @new_func: pointer to the patched function code 42 * @old_sympos: a hint indicating which symbol position the old function 43 * can be found (optional) 44 * @old_func: pointer to the function being patched 45 * @kobj: kobject for sysfs resources 46 * @node: list node for klp_object func_list 47 * @stack_node: list node for klp_ops func_stack list 48 * @old_size: size of the old function 49 * @new_size: size of the new function 50 * @nop: temporary patch to use the original code again; dyn. allocated 51 * @patched: the func has been added to the klp_ops list 52 * @transition: the func is currently being applied or reverted 53 * 54 * The patched and transition variables define the func's patching state. When 55 * patching, a func is always in one of the following states: 56 * 57 * patched=0 transition=0: unpatched 58 * patched=0 transition=1: unpatched, temporary starting state 59 * patched=1 transition=1: patched, may be visible to some tasks 60 * patched=1 transition=0: patched, visible to all tasks 61 * 62 * And when unpatching, it goes in the reverse order: 63 * 64 * patched=1 transition=0: patched, visible to all tasks 65 * patched=1 transition=1: patched, may be visible to some tasks 66 * patched=0 transition=1: unpatched, temporary ending state 67 * patched=0 transition=0: unpatched 68 */ 69 struct klp_func { 70 /* external */ 71 const char *old_name; 72 void *new_func; 73 /* 74 * The old_sympos field is optional and can be used to resolve 75 * duplicate symbol names in livepatch objects. If this field is zero, 76 * it is expected the symbol is unique, otherwise patching fails. If 77 * this value is greater than zero then that occurrence of the symbol 78 * in kallsyms for the given object is used. 79 */ 80 unsigned long old_sympos; 81 82 /* internal */ 83 void *old_func; 84 struct kobject kobj; 85 struct list_head node; 86 struct list_head stack_node; 87 unsigned long old_size, new_size; 88 bool nop; 89 bool patched; 90 bool transition; 91 }; 92 93 struct klp_object; 94 95 /** 96 * struct klp_callbacks - pre/post live-(un)patch callback structure 97 * @pre_patch: executed before code patching 98 * @post_patch: executed after code patching 99 * @pre_unpatch: executed before code unpatching 100 * @post_unpatch: executed after code unpatching 101 * @post_unpatch_enabled: flag indicating if post-unpatch callback 102 * should run 103 * 104 * All callbacks are optional. Only the pre-patch callback, if provided, 105 * will be unconditionally executed. If the parent klp_object fails to 106 * patch for any reason, including a non-zero error status returned from 107 * the pre-patch callback, no further callbacks will be executed. 108 */ 109 struct klp_callbacks { 110 int (*pre_patch)(struct klp_object *obj); 111 void (*post_patch)(struct klp_object *obj); 112 void (*pre_unpatch)(struct klp_object *obj); 113 void (*post_unpatch)(struct klp_object *obj); 114 bool post_unpatch_enabled; 115 }; 116 117 /** 118 * struct klp_object - kernel object structure for live patching 119 * @name: module name (or NULL for vmlinux) 120 * @funcs: function entries for functions to be patched in the object 121 * @callbacks: functions to be executed pre/post (un)patching 122 * @kobj: kobject for sysfs resources 123 * @func_list: dynamic list of the function entries 124 * @node: list node for klp_patch obj_list 125 * @mod: kernel module associated with the patched object 126 * (NULL for vmlinux) 127 * @dynamic: temporary object for nop functions; dynamically allocated 128 * @patched: the object's funcs have been added to the klp_ops list 129 */ 130 struct klp_object { 131 /* external */ 132 const char *name; 133 struct klp_func *funcs; 134 struct klp_callbacks callbacks; 135 136 /* internal */ 137 struct kobject kobj; 138 struct list_head func_list; 139 struct list_head node; 140 struct module *mod; 141 bool dynamic; 142 bool patched; 143 }; 144 145 /** 146 * struct klp_patch - patch structure for live patching 147 * @mod: reference to the live patch module 148 * @objs: object entries for kernel objects to be patched 149 * @replace: replace all actively used patches 150 * @list: list node for global list of actively used patches 151 * @kobj: kobject for sysfs resources 152 * @obj_list: dynamic list of the object entries 153 * @enabled: the patch is enabled (but operation may be incomplete) 154 * @forced: was involved in a forced transition 155 * @free_work: patch cleanup from workqueue-context 156 * @finish: for waiting till it is safe to remove the patch module 157 */ 158 struct klp_patch { 159 /* external */ 160 struct module *mod; 161 struct klp_object *objs; 162 bool replace; 163 164 /* internal */ 165 struct list_head list; 166 struct kobject kobj; 167 struct list_head obj_list; 168 bool enabled; 169 bool forced; 170 struct work_struct free_work; 171 struct completion finish; 172 }; 173 174 #define klp_for_each_object_static(patch, obj) \ 175 for (obj = patch->objs; obj->funcs || obj->name; obj++) 176 177 #define klp_for_each_object_safe(patch, obj, tmp_obj) \ 178 list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node) 179 180 #define klp_for_each_object(patch, obj) \ 181 list_for_each_entry(obj, &patch->obj_list, node) 182 183 #define klp_for_each_func_static(obj, func) \ 184 for (func = obj->funcs; \ 185 func->old_name || func->new_func || func->old_sympos; \ 186 func++) 187 188 #define klp_for_each_func_safe(obj, func, tmp_func) \ 189 list_for_each_entry_safe(func, tmp_func, &obj->func_list, node) 190 191 #define klp_for_each_func(obj, func) \ 192 list_for_each_entry(func, &obj->func_list, node) 193 194 int klp_enable_patch(struct klp_patch *); 195 196 void arch_klp_init_object_loaded(struct klp_patch *patch, 197 struct klp_object *obj); 198 199 /* Called from the module loader during module coming/going states */ 200 int klp_module_coming(struct module *mod); 201 void klp_module_going(struct module *mod); 202 203 void klp_copy_process(struct task_struct *child); 204 void klp_update_patch_state(struct task_struct *task); 205 206 static inline bool klp_patch_pending(struct task_struct *task) 207 { 208 return test_tsk_thread_flag(task, TIF_PATCH_PENDING); 209 } 210 211 static inline bool klp_have_reliable_stack(void) 212 { 213 return IS_ENABLED(CONFIG_STACKTRACE) && 214 IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE); 215 } 216 217 typedef int (*klp_shadow_ctor_t)(void *obj, 218 void *shadow_data, 219 void *ctor_data); 220 typedef void (*klp_shadow_dtor_t)(void *obj, void *shadow_data); 221 222 void *klp_shadow_get(void *obj, unsigned long id); 223 void *klp_shadow_alloc(void *obj, unsigned long id, 224 size_t size, gfp_t gfp_flags, 225 klp_shadow_ctor_t ctor, void *ctor_data); 226 void *klp_shadow_get_or_alloc(void *obj, unsigned long id, 227 size_t size, gfp_t gfp_flags, 228 klp_shadow_ctor_t ctor, void *ctor_data); 229 void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor); 230 void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor); 231 232 #else /* !CONFIG_LIVEPATCH */ 233 234 static inline int klp_module_coming(struct module *mod) { return 0; } 235 static inline void klp_module_going(struct module *mod) {} 236 static inline bool klp_patch_pending(struct task_struct *task) { return false; } 237 static inline void klp_update_patch_state(struct task_struct *task) {} 238 static inline void klp_copy_process(struct task_struct *child) {} 239 240 #endif /* CONFIG_LIVEPATCH */ 241 242 #endif /* _LINUX_LIVEPATCH_H_ */ 243