1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2008 Michael Ellerman, IBM Corporation. 4 */ 5 6 #include <linux/kprobes.h> 7 #include <linux/mmu_context.h> 8 #include <linux/random.h> 9 #include <linux/vmalloc.h> 10 #include <linux/init.h> 11 #include <linux/cpuhotplug.h> 12 #include <linux/uaccess.h> 13 #include <linux/jump_label.h> 14 15 #include <asm/debug.h> 16 #include <asm/pgalloc.h> 17 #include <asm/tlb.h> 18 #include <asm/tlbflush.h> 19 #include <asm/page.h> 20 #include <asm/code-patching.h> 21 #include <asm/inst.h> 22 23 static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr) 24 { 25 if (!ppc_inst_prefixed(instr)) { 26 u32 val = ppc_inst_val(instr); 27 28 __put_kernel_nofault(patch_addr, &val, u32, failed); 29 } else { 30 u64 val = ppc_inst_as_ulong(instr); 31 32 __put_kernel_nofault(patch_addr, &val, u64, failed); 33 } 34 35 asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr), 36 "r" (exec_addr)); 37 38 return 0; 39 40 failed: 41 return -EPERM; 42 } 43 44 int raw_patch_instruction(u32 *addr, ppc_inst_t instr) 45 { 46 return __patch_instruction(addr, instr, addr); 47 } 48 49 struct patch_context { 50 union { 51 struct vm_struct *area; 52 struct mm_struct *mm; 53 }; 54 unsigned long addr; 55 pte_t *pte; 56 }; 57 58 static DEFINE_PER_CPU(struct patch_context, cpu_patching_context); 59 60 static int map_patch_area(void *addr, unsigned long text_poke_addr); 61 static void unmap_patch_area(unsigned long addr); 62 63 static bool mm_patch_enabled(void) 64 { 65 return IS_ENABLED(CONFIG_SMP) && radix_enabled(); 66 } 67 68 /* 69 * The following applies for Radix MMU. Hash MMU has different requirements, 70 * and so is not supported. 71 * 72 * Changing mm requires context synchronising instructions on both sides of 73 * the context switch, as well as a hwsync between the last instruction for 74 * which the address of an associated storage access was translated using 75 * the current context. 76 * 77 * switch_mm_irqs_off() performs an isync after the context switch. It is 78 * the responsibility of the caller to perform the CSI and hwsync before 79 * starting/stopping the temp mm. 80 */ 81 static struct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm) 82 { 83 struct mm_struct *orig_mm = current->active_mm; 84 85 lockdep_assert_irqs_disabled(); 86 switch_mm_irqs_off(orig_mm, temp_mm, current); 87 88 WARN_ON(!mm_is_thread_local(temp_mm)); 89 90 suspend_breakpoints(); 91 return orig_mm; 92 } 93 94 static void stop_using_temp_mm(struct mm_struct *temp_mm, 95 struct mm_struct *orig_mm) 96 { 97 lockdep_assert_irqs_disabled(); 98 switch_mm_irqs_off(temp_mm, orig_mm, current); 99 restore_breakpoints(); 100 } 101 102 static int text_area_cpu_up(unsigned int cpu) 103 { 104 struct vm_struct *area; 105 unsigned long addr; 106 int err; 107 108 area = get_vm_area(PAGE_SIZE, VM_ALLOC); 109 if (!area) { 110 WARN_ONCE(1, "Failed to create text area for cpu %d\n", 111 cpu); 112 return -1; 113 } 114 115 // Map/unmap the area to ensure all page tables are pre-allocated 116 addr = (unsigned long)area->addr; 117 err = map_patch_area(empty_zero_page, addr); 118 if (err) 119 return err; 120 121 unmap_patch_area(addr); 122 123 this_cpu_write(cpu_patching_context.area, area); 124 this_cpu_write(cpu_patching_context.addr, addr); 125 this_cpu_write(cpu_patching_context.pte, virt_to_kpte(addr)); 126 127 return 0; 128 } 129 130 static int text_area_cpu_down(unsigned int cpu) 131 { 132 free_vm_area(this_cpu_read(cpu_patching_context.area)); 133 this_cpu_write(cpu_patching_context.area, NULL); 134 this_cpu_write(cpu_patching_context.addr, 0); 135 this_cpu_write(cpu_patching_context.pte, NULL); 136 return 0; 137 } 138 139 static void put_patching_mm(struct mm_struct *mm, unsigned long patching_addr) 140 { 141 struct mmu_gather tlb; 142 143 tlb_gather_mmu(&tlb, mm); 144 free_pgd_range(&tlb, patching_addr, patching_addr + PAGE_SIZE, 0, 0); 145 mmput(mm); 146 } 147 148 static int text_area_cpu_up_mm(unsigned int cpu) 149 { 150 struct mm_struct *mm; 151 unsigned long addr; 152 pte_t *pte; 153 spinlock_t *ptl; 154 155 mm = mm_alloc(); 156 if (WARN_ON(!mm)) 157 goto fail_no_mm; 158 159 /* 160 * Choose a random page-aligned address from the interval 161 * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE]. 162 * The lower address bound is PAGE_SIZE to avoid the zero-page. 163 */ 164 addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT; 165 166 /* 167 * PTE allocation uses GFP_KERNEL which means we need to 168 * pre-allocate the PTE here because we cannot do the 169 * allocation during patching when IRQs are disabled. 170 * 171 * Using get_locked_pte() to avoid open coding, the lock 172 * is unnecessary. 173 */ 174 pte = get_locked_pte(mm, addr, &ptl); 175 if (!pte) 176 goto fail_no_pte; 177 pte_unmap_unlock(pte, ptl); 178 179 this_cpu_write(cpu_patching_context.mm, mm); 180 this_cpu_write(cpu_patching_context.addr, addr); 181 this_cpu_write(cpu_patching_context.pte, pte); 182 183 return 0; 184 185 fail_no_pte: 186 put_patching_mm(mm, addr); 187 fail_no_mm: 188 return -ENOMEM; 189 } 190 191 static int text_area_cpu_down_mm(unsigned int cpu) 192 { 193 put_patching_mm(this_cpu_read(cpu_patching_context.mm), 194 this_cpu_read(cpu_patching_context.addr)); 195 196 this_cpu_write(cpu_patching_context.mm, NULL); 197 this_cpu_write(cpu_patching_context.addr, 0); 198 this_cpu_write(cpu_patching_context.pte, NULL); 199 200 return 0; 201 } 202 203 static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done); 204 205 void __init poking_init(void) 206 { 207 int ret; 208 209 if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) 210 return; 211 212 if (mm_patch_enabled()) 213 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 214 "powerpc/text_poke_mm:online", 215 text_area_cpu_up_mm, 216 text_area_cpu_down_mm); 217 else 218 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 219 "powerpc/text_poke:online", 220 text_area_cpu_up, 221 text_area_cpu_down); 222 223 /* cpuhp_setup_state returns >= 0 on success */ 224 if (WARN_ON(ret < 0)) 225 return; 226 227 static_branch_enable(&poking_init_done); 228 } 229 230 static unsigned long get_patch_pfn(void *addr) 231 { 232 if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) 233 return vmalloc_to_pfn(addr); 234 else 235 return __pa_symbol(addr) >> PAGE_SHIFT; 236 } 237 238 /* 239 * This can be called for kernel text or a module. 240 */ 241 static int map_patch_area(void *addr, unsigned long text_poke_addr) 242 { 243 unsigned long pfn = get_patch_pfn(addr); 244 245 return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); 246 } 247 248 static void unmap_patch_area(unsigned long addr) 249 { 250 pte_t *ptep; 251 pmd_t *pmdp; 252 pud_t *pudp; 253 p4d_t *p4dp; 254 pgd_t *pgdp; 255 256 pgdp = pgd_offset_k(addr); 257 if (WARN_ON(pgd_none(*pgdp))) 258 return; 259 260 p4dp = p4d_offset(pgdp, addr); 261 if (WARN_ON(p4d_none(*p4dp))) 262 return; 263 264 pudp = pud_offset(p4dp, addr); 265 if (WARN_ON(pud_none(*pudp))) 266 return; 267 268 pmdp = pmd_offset(pudp, addr); 269 if (WARN_ON(pmd_none(*pmdp))) 270 return; 271 272 ptep = pte_offset_kernel(pmdp, addr); 273 if (WARN_ON(pte_none(*ptep))) 274 return; 275 276 /* 277 * In hash, pte_clear flushes the tlb, in radix, we have to 278 */ 279 pte_clear(&init_mm, addr, ptep); 280 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 281 } 282 283 static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) 284 { 285 int err; 286 u32 *patch_addr; 287 unsigned long text_poke_addr; 288 pte_t *pte; 289 unsigned long pfn = get_patch_pfn(addr); 290 struct mm_struct *patching_mm; 291 struct mm_struct *orig_mm; 292 293 patching_mm = __this_cpu_read(cpu_patching_context.mm); 294 pte = __this_cpu_read(cpu_patching_context.pte); 295 text_poke_addr = __this_cpu_read(cpu_patching_context.addr); 296 patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 297 298 __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); 299 300 /* order PTE update before use, also serves as the hwsync */ 301 asm volatile("ptesync": : :"memory"); 302 303 /* order context switch after arbitrary prior code */ 304 isync(); 305 306 orig_mm = start_using_temp_mm(patching_mm); 307 308 err = __patch_instruction(addr, instr, patch_addr); 309 310 /* hwsync performed by __patch_instruction (sync) if successful */ 311 if (err) 312 mb(); /* sync */ 313 314 /* context synchronisation performed by __patch_instruction (isync or exception) */ 315 stop_using_temp_mm(patching_mm, orig_mm); 316 317 pte_clear(patching_mm, text_poke_addr, pte); 318 /* 319 * ptesync to order PTE update before TLB invalidation done 320 * by radix__local_flush_tlb_page_psize (in _tlbiel_va) 321 */ 322 local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize); 323 324 return err; 325 } 326 327 static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) 328 { 329 int err; 330 u32 *patch_addr; 331 unsigned long text_poke_addr; 332 pte_t *pte; 333 unsigned long pfn = get_patch_pfn(addr); 334 335 text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK; 336 patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 337 338 pte = __this_cpu_read(cpu_patching_context.pte); 339 __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); 340 /* See ptesync comment in radix__set_pte_at() */ 341 if (radix_enabled()) 342 asm volatile("ptesync": : :"memory"); 343 344 err = __patch_instruction(addr, instr, patch_addr); 345 346 pte_clear(&init_mm, text_poke_addr, pte); 347 flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE); 348 349 return err; 350 } 351 352 static int do_patch_instruction(u32 *addr, ppc_inst_t instr) 353 { 354 int err; 355 unsigned long flags; 356 357 /* 358 * During early early boot patch_instruction is called 359 * when text_poke_area is not ready, but we still need 360 * to allow patching. We just do the plain old patching 361 */ 362 if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) || 363 !static_branch_likely(&poking_init_done)) 364 return raw_patch_instruction(addr, instr); 365 366 local_irq_save(flags); 367 if (mm_patch_enabled()) 368 err = __do_patch_instruction_mm(addr, instr); 369 else 370 err = __do_patch_instruction(addr, instr); 371 local_irq_restore(flags); 372 373 return err; 374 } 375 376 __ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free); 377 378 int patch_instruction(u32 *addr, ppc_inst_t instr) 379 { 380 /* Make sure we aren't patching a freed init section */ 381 if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4)) 382 return 0; 383 384 return do_patch_instruction(addr, instr); 385 } 386 NOKPROBE_SYMBOL(patch_instruction); 387 388 int patch_branch(u32 *addr, unsigned long target, int flags) 389 { 390 ppc_inst_t instr; 391 392 if (create_branch(&instr, addr, target, flags)) 393 return -ERANGE; 394 395 return patch_instruction(addr, instr); 396 } 397 398 /* 399 * Helper to check if a given instruction is a conditional branch 400 * Derived from the conditional checks in analyse_instr() 401 */ 402 bool is_conditional_branch(ppc_inst_t instr) 403 { 404 unsigned int opcode = ppc_inst_primary_opcode(instr); 405 406 if (opcode == 16) /* bc, bca, bcl, bcla */ 407 return true; 408 if (opcode == 19) { 409 switch ((ppc_inst_val(instr) >> 1) & 0x3ff) { 410 case 16: /* bclr, bclrl */ 411 case 528: /* bcctr, bcctrl */ 412 case 560: /* bctar, bctarl */ 413 return true; 414 } 415 } 416 return false; 417 } 418 NOKPROBE_SYMBOL(is_conditional_branch); 419 420 int create_cond_branch(ppc_inst_t *instr, const u32 *addr, 421 unsigned long target, int flags) 422 { 423 long offset; 424 425 offset = target; 426 if (! (flags & BRANCH_ABSOLUTE)) 427 offset = offset - (unsigned long)addr; 428 429 /* Check we can represent the target in the instruction format */ 430 if (!is_offset_in_cond_branch_range(offset)) 431 return 1; 432 433 /* Mask out the flags and target, so they don't step on each other. */ 434 *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC)); 435 436 return 0; 437 } 438 439 int instr_is_relative_branch(ppc_inst_t instr) 440 { 441 if (ppc_inst_val(instr) & BRANCH_ABSOLUTE) 442 return 0; 443 444 return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); 445 } 446 447 int instr_is_relative_link_branch(ppc_inst_t instr) 448 { 449 return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK); 450 } 451 452 static unsigned long branch_iform_target(const u32 *instr) 453 { 454 signed long imm; 455 456 imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC; 457 458 /* If the top bit of the immediate value is set this is negative */ 459 if (imm & 0x2000000) 460 imm -= 0x4000000; 461 462 if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) 463 imm += (unsigned long)instr; 464 465 return (unsigned long)imm; 466 } 467 468 static unsigned long branch_bform_target(const u32 *instr) 469 { 470 signed long imm; 471 472 imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC; 473 474 /* If the top bit of the immediate value is set this is negative */ 475 if (imm & 0x8000) 476 imm -= 0x10000; 477 478 if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) 479 imm += (unsigned long)instr; 480 481 return (unsigned long)imm; 482 } 483 484 unsigned long branch_target(const u32 *instr) 485 { 486 if (instr_is_branch_iform(ppc_inst_read(instr))) 487 return branch_iform_target(instr); 488 else if (instr_is_branch_bform(ppc_inst_read(instr))) 489 return branch_bform_target(instr); 490 491 return 0; 492 } 493 494 int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src) 495 { 496 unsigned long target; 497 target = branch_target(src); 498 499 if (instr_is_branch_iform(ppc_inst_read(src))) 500 return create_branch(instr, dest, target, 501 ppc_inst_val(ppc_inst_read(src))); 502 else if (instr_is_branch_bform(ppc_inst_read(src))) 503 return create_cond_branch(instr, dest, target, 504 ppc_inst_val(ppc_inst_read(src))); 505 506 return 1; 507 } 508