1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2008 Michael Ellerman, IBM Corporation. 4 */ 5 6 #include <linux/kprobes.h> 7 #include <linux/vmalloc.h> 8 #include <linux/init.h> 9 #include <linux/cpuhotplug.h> 10 #include <linux/uaccess.h> 11 #include <linux/jump_label.h> 12 13 #include <asm/tlbflush.h> 14 #include <asm/page.h> 15 #include <asm/code-patching.h> 16 #include <asm/inst.h> 17 18 static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr) 19 { 20 if (!ppc_inst_prefixed(instr)) { 21 u32 val = ppc_inst_val(instr); 22 23 __put_kernel_nofault(patch_addr, &val, u32, failed); 24 } else { 25 u64 val = ppc_inst_as_ulong(instr); 26 27 __put_kernel_nofault(patch_addr, &val, u64, failed); 28 } 29 30 asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr), 31 "r" (exec_addr)); 32 33 return 0; 34 35 failed: 36 return -EFAULT; 37 } 38 39 int raw_patch_instruction(u32 *addr, ppc_inst_t instr) 40 { 41 return __patch_instruction(addr, instr, addr); 42 } 43 44 #ifdef CONFIG_STRICT_KERNEL_RWX 45 static DEFINE_PER_CPU(struct vm_struct *, text_poke_area); 46 47 static int map_patch_area(void *addr, unsigned long text_poke_addr); 48 static void unmap_patch_area(unsigned long addr); 49 50 static int text_area_cpu_up(unsigned int cpu) 51 { 52 struct vm_struct *area; 53 unsigned long addr; 54 int err; 55 56 area = get_vm_area(PAGE_SIZE, VM_ALLOC); 57 if (!area) { 58 WARN_ONCE(1, "Failed to create text area for cpu %d\n", 59 cpu); 60 return -1; 61 } 62 63 // Map/unmap the area to ensure all page tables are pre-allocated 64 addr = (unsigned long)area->addr; 65 err = map_patch_area(empty_zero_page, addr); 66 if (err) 67 return err; 68 69 unmap_patch_area(addr); 70 71 this_cpu_write(text_poke_area, area); 72 73 return 0; 74 } 75 76 static int text_area_cpu_down(unsigned int cpu) 77 { 78 free_vm_area(this_cpu_read(text_poke_area)); 79 return 0; 80 } 81 82 static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done); 83 84 /* 85 * Although BUG_ON() is rude, in this case it should only happen if ENOMEM, and 86 * we judge it as being preferable to a kernel that will crash later when 87 * someone tries to use patch_instruction(). 88 */ 89 void __init poking_init(void) 90 { 91 BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 92 "powerpc/text_poke:online", text_area_cpu_up, 93 text_area_cpu_down)); 94 static_branch_enable(&poking_init_done); 95 } 96 97 /* 98 * This can be called for kernel text or a module. 99 */ 100 static int map_patch_area(void *addr, unsigned long text_poke_addr) 101 { 102 unsigned long pfn; 103 104 if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) 105 pfn = vmalloc_to_pfn(addr); 106 else 107 pfn = __pa_symbol(addr) >> PAGE_SHIFT; 108 109 return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); 110 } 111 112 static void unmap_patch_area(unsigned long addr) 113 { 114 pte_t *ptep; 115 pmd_t *pmdp; 116 pud_t *pudp; 117 p4d_t *p4dp; 118 pgd_t *pgdp; 119 120 pgdp = pgd_offset_k(addr); 121 if (WARN_ON(pgd_none(*pgdp))) 122 return; 123 124 p4dp = p4d_offset(pgdp, addr); 125 if (WARN_ON(p4d_none(*p4dp))) 126 return; 127 128 pudp = pud_offset(p4dp, addr); 129 if (WARN_ON(pud_none(*pudp))) 130 return; 131 132 pmdp = pmd_offset(pudp, addr); 133 if (WARN_ON(pmd_none(*pmdp))) 134 return; 135 136 ptep = pte_offset_kernel(pmdp, addr); 137 if (WARN_ON(pte_none(*ptep))) 138 return; 139 140 /* 141 * In hash, pte_clear flushes the tlb, in radix, we have to 142 */ 143 pte_clear(&init_mm, addr, ptep); 144 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 145 } 146 147 static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) 148 { 149 int err; 150 u32 *patch_addr; 151 unsigned long text_poke_addr; 152 153 text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr; 154 patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 155 156 err = map_patch_area(addr, text_poke_addr); 157 if (err) 158 return err; 159 160 err = __patch_instruction(addr, instr, patch_addr); 161 162 unmap_patch_area(text_poke_addr); 163 164 return err; 165 } 166 167 static int do_patch_instruction(u32 *addr, ppc_inst_t instr) 168 { 169 int err; 170 unsigned long flags; 171 172 /* 173 * During early early boot patch_instruction is called 174 * when text_poke_area is not ready, but we still need 175 * to allow patching. We just do the plain old patching 176 */ 177 if (!static_branch_likely(&poking_init_done)) 178 return raw_patch_instruction(addr, instr); 179 180 local_irq_save(flags); 181 err = __do_patch_instruction(addr, instr); 182 local_irq_restore(flags); 183 184 return err; 185 } 186 #else /* !CONFIG_STRICT_KERNEL_RWX */ 187 188 static int do_patch_instruction(u32 *addr, ppc_inst_t instr) 189 { 190 return raw_patch_instruction(addr, instr); 191 } 192 193 #endif /* CONFIG_STRICT_KERNEL_RWX */ 194 195 __ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free); 196 197 int patch_instruction(u32 *addr, ppc_inst_t instr) 198 { 199 /* Make sure we aren't patching a freed init section */ 200 if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4)) 201 return 0; 202 203 return do_patch_instruction(addr, instr); 204 } 205 NOKPROBE_SYMBOL(patch_instruction); 206 207 int patch_branch(u32 *addr, unsigned long target, int flags) 208 { 209 ppc_inst_t instr; 210 211 if (create_branch(&instr, addr, target, flags)) 212 return -ERANGE; 213 214 return patch_instruction(addr, instr); 215 } 216 217 bool is_offset_in_branch_range(long offset) 218 { 219 /* 220 * Powerpc branch instruction is : 221 * 222 * 0 6 30 31 223 * +---------+----------------+---+---+ 224 * | opcode | LI |AA |LK | 225 * +---------+----------------+---+---+ 226 * Where AA = 0 and LK = 0 227 * 228 * LI is a signed 24 bits integer. The real branch offset is computed 229 * by: imm32 = SignExtend(LI:'0b00', 32); 230 * 231 * So the maximum forward branch should be: 232 * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc 233 * The maximum backward branch should be: 234 * (0xff800000 << 2) = 0xfe000000 = -0x2000000 235 */ 236 return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3)); 237 } 238 239 bool is_offset_in_cond_branch_range(long offset) 240 { 241 return offset >= -0x8000 && offset <= 0x7fff && !(offset & 0x3); 242 } 243 244 /* 245 * Helper to check if a given instruction is a conditional branch 246 * Derived from the conditional checks in analyse_instr() 247 */ 248 bool is_conditional_branch(ppc_inst_t instr) 249 { 250 unsigned int opcode = ppc_inst_primary_opcode(instr); 251 252 if (opcode == 16) /* bc, bca, bcl, bcla */ 253 return true; 254 if (opcode == 19) { 255 switch ((ppc_inst_val(instr) >> 1) & 0x3ff) { 256 case 16: /* bclr, bclrl */ 257 case 528: /* bcctr, bcctrl */ 258 case 560: /* bctar, bctarl */ 259 return true; 260 } 261 } 262 return false; 263 } 264 NOKPROBE_SYMBOL(is_conditional_branch); 265 266 int create_branch(ppc_inst_t *instr, const u32 *addr, 267 unsigned long target, int flags) 268 { 269 long offset; 270 271 *instr = ppc_inst(0); 272 offset = target; 273 if (! (flags & BRANCH_ABSOLUTE)) 274 offset = offset - (unsigned long)addr; 275 276 /* Check we can represent the target in the instruction format */ 277 if (!is_offset_in_branch_range(offset)) 278 return 1; 279 280 /* Mask out the flags and target, so they don't step on each other. */ 281 *instr = ppc_inst(0x48000000 | (flags & 0x3) | (offset & 0x03FFFFFC)); 282 283 return 0; 284 } 285 286 int create_cond_branch(ppc_inst_t *instr, const u32 *addr, 287 unsigned long target, int flags) 288 { 289 long offset; 290 291 offset = target; 292 if (! (flags & BRANCH_ABSOLUTE)) 293 offset = offset - (unsigned long)addr; 294 295 /* Check we can represent the target in the instruction format */ 296 if (!is_offset_in_cond_branch_range(offset)) 297 return 1; 298 299 /* Mask out the flags and target, so they don't step on each other. */ 300 *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC)); 301 302 return 0; 303 } 304 305 int instr_is_relative_branch(ppc_inst_t instr) 306 { 307 if (ppc_inst_val(instr) & BRANCH_ABSOLUTE) 308 return 0; 309 310 return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); 311 } 312 313 int instr_is_relative_link_branch(ppc_inst_t instr) 314 { 315 return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK); 316 } 317 318 static unsigned long branch_iform_target(const u32 *instr) 319 { 320 signed long imm; 321 322 imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC; 323 324 /* If the top bit of the immediate value is set this is negative */ 325 if (imm & 0x2000000) 326 imm -= 0x4000000; 327 328 if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) 329 imm += (unsigned long)instr; 330 331 return (unsigned long)imm; 332 } 333 334 static unsigned long branch_bform_target(const u32 *instr) 335 { 336 signed long imm; 337 338 imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC; 339 340 /* If the top bit of the immediate value is set this is negative */ 341 if (imm & 0x8000) 342 imm -= 0x10000; 343 344 if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) 345 imm += (unsigned long)instr; 346 347 return (unsigned long)imm; 348 } 349 350 unsigned long branch_target(const u32 *instr) 351 { 352 if (instr_is_branch_iform(ppc_inst_read(instr))) 353 return branch_iform_target(instr); 354 else if (instr_is_branch_bform(ppc_inst_read(instr))) 355 return branch_bform_target(instr); 356 357 return 0; 358 } 359 360 int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src) 361 { 362 unsigned long target; 363 target = branch_target(src); 364 365 if (instr_is_branch_iform(ppc_inst_read(src))) 366 return create_branch(instr, dest, target, 367 ppc_inst_val(ppc_inst_read(src))); 368 else if (instr_is_branch_bform(ppc_inst_read(src))) 369 return create_cond_branch(instr, dest, target, 370 ppc_inst_val(ppc_inst_read(src))); 371 372 return 1; 373 } 374