1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AArch64 loadable module support. 4 * 5 * Copyright (C) 2012 ARM Limited 6 * 7 * Author: Will Deacon <[email protected]> 8 */ 9 10 #include <linux/bitops.h> 11 #include <linux/elf.h> 12 #include <linux/ftrace.h> 13 #include <linux/gfp.h> 14 #include <linux/kasan.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/moduleloader.h> 18 #include <linux/random.h> 19 #include <linux/scs.h> 20 #include <linux/vmalloc.h> 21 22 #include <asm/alternative.h> 23 #include <asm/insn.h> 24 #include <asm/scs.h> 25 #include <asm/sections.h> 26 27 static u64 __ro_after_init module_alloc_base = (u64)_etext - MODULES_VSIZE; 28 29 #ifdef CONFIG_RANDOMIZE_BASE 30 static int __init kaslr_module_init(void) 31 { 32 u64 module_range; 33 u32 seed; 34 35 if (!kaslr_enabled()) 36 return 0; 37 38 seed = get_random_u32(); 39 40 if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) { 41 /* 42 * Randomize the module region over a 2 GB window covering the 43 * kernel. This reduces the risk of modules leaking information 44 * about the address of the kernel itself, but results in 45 * branches between modules and the core kernel that are 46 * resolved via PLTs. (Branches between modules will be 47 * resolved normally.) 48 */ 49 module_range = SZ_2G - (u64)(_end - _stext); 50 module_alloc_base = max((u64)_end - SZ_2G, (u64)MODULES_VADDR); 51 } else { 52 /* 53 * Randomize the module region by setting module_alloc_base to 54 * a PAGE_SIZE multiple in the range [_etext - MODULES_VSIZE, 55 * _stext) . This guarantees that the resulting region still 56 * covers [_stext, _etext], and that all relative branches can 57 * be resolved without veneers unless this region is exhausted 58 * and we fall back to a larger 2GB window in module_alloc() 59 * when ARM64_MODULE_PLTS is enabled. 60 */ 61 module_range = MODULES_VSIZE - (u64)(_etext - _stext); 62 } 63 64 /* use the lower 21 bits to randomize the base of the module region */ 65 module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; 66 module_alloc_base &= PAGE_MASK; 67 68 return 0; 69 } 70 subsys_initcall(kaslr_module_init) 71 #endif 72 73 void *module_alloc(unsigned long size) 74 { 75 u64 module_alloc_end = module_alloc_base + MODULES_VSIZE; 76 void *p; 77 78 /* 79 * Where possible, prefer to allocate within direct branch range of the 80 * kernel such that no PLTs are necessary. This may fail, so we pass 81 * __GFP_NOWARN to silence the resulting warning. 82 */ 83 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, 84 module_alloc_end, GFP_KERNEL | __GFP_NOWARN, 85 PAGE_KERNEL, 0, NUMA_NO_NODE, 86 __builtin_return_address(0)); 87 88 if (!p) { 89 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base, 90 module_alloc_base + SZ_2G, GFP_KERNEL, 91 PAGE_KERNEL, 0, NUMA_NO_NODE, 92 __builtin_return_address(0)); 93 } 94 95 if (p && (kasan_alloc_module_shadow(p, size, GFP_KERNEL) < 0)) { 96 vfree(p); 97 return NULL; 98 } 99 100 /* Memory is intended to be executable, reset the pointer tag. */ 101 return kasan_reset_tag(p); 102 } 103 104 enum aarch64_reloc_op { 105 RELOC_OP_NONE, 106 RELOC_OP_ABS, 107 RELOC_OP_PREL, 108 RELOC_OP_PAGE, 109 }; 110 111 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val) 112 { 113 switch (reloc_op) { 114 case RELOC_OP_ABS: 115 return val; 116 case RELOC_OP_PREL: 117 return val - (u64)place; 118 case RELOC_OP_PAGE: 119 return (val & ~0xfff) - ((u64)place & ~0xfff); 120 case RELOC_OP_NONE: 121 return 0; 122 } 123 124 pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); 125 return 0; 126 } 127 128 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) 129 { 130 s64 sval = do_reloc(op, place, val); 131 132 /* 133 * The ELF psABI for AArch64 documents the 16-bit and 32-bit place 134 * relative and absolute relocations as having a range of [-2^15, 2^16) 135 * or [-2^31, 2^32), respectively. However, in order to be able to 136 * detect overflows reliably, we have to choose whether we interpret 137 * such quantities as signed or as unsigned, and stick with it. 138 * The way we organize our address space requires a signed 139 * interpretation of 32-bit relative references, so let's use that 140 * for all R_AARCH64_PRELxx relocations. This means our upper 141 * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX. 142 */ 143 144 switch (len) { 145 case 16: 146 *(s16 *)place = sval; 147 switch (op) { 148 case RELOC_OP_ABS: 149 if (sval < 0 || sval > U16_MAX) 150 return -ERANGE; 151 break; 152 case RELOC_OP_PREL: 153 if (sval < S16_MIN || sval > S16_MAX) 154 return -ERANGE; 155 break; 156 default: 157 pr_err("Invalid 16-bit data relocation (%d)\n", op); 158 return 0; 159 } 160 break; 161 case 32: 162 *(s32 *)place = sval; 163 switch (op) { 164 case RELOC_OP_ABS: 165 if (sval < 0 || sval > U32_MAX) 166 return -ERANGE; 167 break; 168 case RELOC_OP_PREL: 169 if (sval < S32_MIN || sval > S32_MAX) 170 return -ERANGE; 171 break; 172 default: 173 pr_err("Invalid 32-bit data relocation (%d)\n", op); 174 return 0; 175 } 176 break; 177 case 64: 178 *(s64 *)place = sval; 179 break; 180 default: 181 pr_err("Invalid length (%d) for data relocation\n", len); 182 return 0; 183 } 184 return 0; 185 } 186 187 enum aarch64_insn_movw_imm_type { 188 AARCH64_INSN_IMM_MOVNZ, 189 AARCH64_INSN_IMM_MOVKZ, 190 }; 191 192 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val, 193 int lsb, enum aarch64_insn_movw_imm_type imm_type) 194 { 195 u64 imm; 196 s64 sval; 197 u32 insn = le32_to_cpu(*place); 198 199 sval = do_reloc(op, place, val); 200 imm = sval >> lsb; 201 202 if (imm_type == AARCH64_INSN_IMM_MOVNZ) { 203 /* 204 * For signed MOVW relocations, we have to manipulate the 205 * instruction encoding depending on whether or not the 206 * immediate is less than zero. 207 */ 208 insn &= ~(3 << 29); 209 if (sval >= 0) { 210 /* >=0: Set the instruction to MOVZ (opcode 10b). */ 211 insn |= 2 << 29; 212 } else { 213 /* 214 * <0: Set the instruction to MOVN (opcode 00b). 215 * Since we've masked the opcode already, we 216 * don't need to do anything other than 217 * inverting the new immediate field. 218 */ 219 imm = ~imm; 220 } 221 } 222 223 /* Update the instruction with the new encoding. */ 224 insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); 225 *place = cpu_to_le32(insn); 226 227 if (imm > U16_MAX) 228 return -ERANGE; 229 230 return 0; 231 } 232 233 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val, 234 int lsb, int len, enum aarch64_insn_imm_type imm_type) 235 { 236 u64 imm, imm_mask; 237 s64 sval; 238 u32 insn = le32_to_cpu(*place); 239 240 /* Calculate the relocation value. */ 241 sval = do_reloc(op, place, val); 242 sval >>= lsb; 243 244 /* Extract the value bits and shift them to bit 0. */ 245 imm_mask = (BIT(lsb + len) - 1) >> lsb; 246 imm = sval & imm_mask; 247 248 /* Update the instruction's immediate field. */ 249 insn = aarch64_insn_encode_immediate(imm_type, insn, imm); 250 *place = cpu_to_le32(insn); 251 252 /* 253 * Extract the upper value bits (including the sign bit) and 254 * shift them to bit 0. 255 */ 256 sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); 257 258 /* 259 * Overflow has occurred if the upper bits are not all equal to 260 * the sign bit of the value. 261 */ 262 if ((u64)(sval + 1) >= 2) 263 return -ERANGE; 264 265 return 0; 266 } 267 268 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs, 269 __le32 *place, u64 val) 270 { 271 u32 insn; 272 273 if (!is_forbidden_offset_for_adrp(place)) 274 return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21, 275 AARCH64_INSN_IMM_ADR); 276 277 /* patch ADRP to ADR if it is in range */ 278 if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21, 279 AARCH64_INSN_IMM_ADR)) { 280 insn = le32_to_cpu(*place); 281 insn &= ~BIT(31); 282 } else { 283 /* out of range for ADR -> emit a veneer */ 284 val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff); 285 if (!val) 286 return -ENOEXEC; 287 insn = aarch64_insn_gen_branch_imm((u64)place, val, 288 AARCH64_INSN_BRANCH_NOLINK); 289 } 290 291 *place = cpu_to_le32(insn); 292 return 0; 293 } 294 295 int apply_relocate_add(Elf64_Shdr *sechdrs, 296 const char *strtab, 297 unsigned int symindex, 298 unsigned int relsec, 299 struct module *me) 300 { 301 unsigned int i; 302 int ovf; 303 bool overflow_check; 304 Elf64_Sym *sym; 305 void *loc; 306 u64 val; 307 Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; 308 309 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 310 /* loc corresponds to P in the AArch64 ELF document. */ 311 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 312 + rel[i].r_offset; 313 314 /* sym is the ELF symbol we're referring to. */ 315 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr 316 + ELF64_R_SYM(rel[i].r_info); 317 318 /* val corresponds to (S + A) in the AArch64 ELF document. */ 319 val = sym->st_value + rel[i].r_addend; 320 321 /* Check for overflow by default. */ 322 overflow_check = true; 323 324 /* Perform the static relocation. */ 325 switch (ELF64_R_TYPE(rel[i].r_info)) { 326 /* Null relocations. */ 327 case R_ARM_NONE: 328 case R_AARCH64_NONE: 329 ovf = 0; 330 break; 331 332 /* Data relocations. */ 333 case R_AARCH64_ABS64: 334 overflow_check = false; 335 ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); 336 break; 337 case R_AARCH64_ABS32: 338 ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); 339 break; 340 case R_AARCH64_ABS16: 341 ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); 342 break; 343 case R_AARCH64_PREL64: 344 overflow_check = false; 345 ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); 346 break; 347 case R_AARCH64_PREL32: 348 ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); 349 break; 350 case R_AARCH64_PREL16: 351 ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); 352 break; 353 354 /* MOVW instruction relocations. */ 355 case R_AARCH64_MOVW_UABS_G0_NC: 356 overflow_check = false; 357 fallthrough; 358 case R_AARCH64_MOVW_UABS_G0: 359 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, 360 AARCH64_INSN_IMM_MOVKZ); 361 break; 362 case R_AARCH64_MOVW_UABS_G1_NC: 363 overflow_check = false; 364 fallthrough; 365 case R_AARCH64_MOVW_UABS_G1: 366 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, 367 AARCH64_INSN_IMM_MOVKZ); 368 break; 369 case R_AARCH64_MOVW_UABS_G2_NC: 370 overflow_check = false; 371 fallthrough; 372 case R_AARCH64_MOVW_UABS_G2: 373 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, 374 AARCH64_INSN_IMM_MOVKZ); 375 break; 376 case R_AARCH64_MOVW_UABS_G3: 377 /* We're using the top bits so we can't overflow. */ 378 overflow_check = false; 379 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, 380 AARCH64_INSN_IMM_MOVKZ); 381 break; 382 case R_AARCH64_MOVW_SABS_G0: 383 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, 384 AARCH64_INSN_IMM_MOVNZ); 385 break; 386 case R_AARCH64_MOVW_SABS_G1: 387 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, 388 AARCH64_INSN_IMM_MOVNZ); 389 break; 390 case R_AARCH64_MOVW_SABS_G2: 391 ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, 392 AARCH64_INSN_IMM_MOVNZ); 393 break; 394 case R_AARCH64_MOVW_PREL_G0_NC: 395 overflow_check = false; 396 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, 397 AARCH64_INSN_IMM_MOVKZ); 398 break; 399 case R_AARCH64_MOVW_PREL_G0: 400 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, 401 AARCH64_INSN_IMM_MOVNZ); 402 break; 403 case R_AARCH64_MOVW_PREL_G1_NC: 404 overflow_check = false; 405 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, 406 AARCH64_INSN_IMM_MOVKZ); 407 break; 408 case R_AARCH64_MOVW_PREL_G1: 409 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, 410 AARCH64_INSN_IMM_MOVNZ); 411 break; 412 case R_AARCH64_MOVW_PREL_G2_NC: 413 overflow_check = false; 414 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, 415 AARCH64_INSN_IMM_MOVKZ); 416 break; 417 case R_AARCH64_MOVW_PREL_G2: 418 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, 419 AARCH64_INSN_IMM_MOVNZ); 420 break; 421 case R_AARCH64_MOVW_PREL_G3: 422 /* We're using the top bits so we can't overflow. */ 423 overflow_check = false; 424 ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, 425 AARCH64_INSN_IMM_MOVNZ); 426 break; 427 428 /* Immediate instruction relocations. */ 429 case R_AARCH64_LD_PREL_LO19: 430 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, 431 AARCH64_INSN_IMM_19); 432 break; 433 case R_AARCH64_ADR_PREL_LO21: 434 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, 435 AARCH64_INSN_IMM_ADR); 436 break; 437 case R_AARCH64_ADR_PREL_PG_HI21_NC: 438 overflow_check = false; 439 fallthrough; 440 case R_AARCH64_ADR_PREL_PG_HI21: 441 ovf = reloc_insn_adrp(me, sechdrs, loc, val); 442 if (ovf && ovf != -ERANGE) 443 return ovf; 444 break; 445 case R_AARCH64_ADD_ABS_LO12_NC: 446 case R_AARCH64_LDST8_ABS_LO12_NC: 447 overflow_check = false; 448 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, 449 AARCH64_INSN_IMM_12); 450 break; 451 case R_AARCH64_LDST16_ABS_LO12_NC: 452 overflow_check = false; 453 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, 454 AARCH64_INSN_IMM_12); 455 break; 456 case R_AARCH64_LDST32_ABS_LO12_NC: 457 overflow_check = false; 458 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, 459 AARCH64_INSN_IMM_12); 460 break; 461 case R_AARCH64_LDST64_ABS_LO12_NC: 462 overflow_check = false; 463 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, 464 AARCH64_INSN_IMM_12); 465 break; 466 case R_AARCH64_LDST128_ABS_LO12_NC: 467 overflow_check = false; 468 ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, 469 AARCH64_INSN_IMM_12); 470 break; 471 case R_AARCH64_TSTBR14: 472 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, 473 AARCH64_INSN_IMM_14); 474 break; 475 case R_AARCH64_CONDBR19: 476 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, 477 AARCH64_INSN_IMM_19); 478 break; 479 case R_AARCH64_JUMP26: 480 case R_AARCH64_CALL26: 481 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, 482 AARCH64_INSN_IMM_26); 483 if (ovf == -ERANGE) { 484 val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym); 485 if (!val) 486 return -ENOEXEC; 487 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 488 26, AARCH64_INSN_IMM_26); 489 } 490 break; 491 492 default: 493 pr_err("module %s: unsupported RELA relocation: %llu\n", 494 me->name, ELF64_R_TYPE(rel[i].r_info)); 495 return -ENOEXEC; 496 } 497 498 if (overflow_check && ovf == -ERANGE) 499 goto overflow; 500 501 } 502 503 return 0; 504 505 overflow: 506 pr_err("module %s: overflow in relocation type %d val %Lx\n", 507 me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); 508 return -ENOEXEC; 509 } 510 511 static inline void __init_plt(struct plt_entry *plt, unsigned long addr) 512 { 513 *plt = get_plt_entry(addr, plt); 514 } 515 516 static int module_init_ftrace_plt(const Elf_Ehdr *hdr, 517 const Elf_Shdr *sechdrs, 518 struct module *mod) 519 { 520 #if defined(CONFIG_DYNAMIC_FTRACE) 521 const Elf_Shdr *s; 522 struct plt_entry *plts; 523 524 s = find_section(hdr, sechdrs, ".text.ftrace_trampoline"); 525 if (!s) 526 return -ENOEXEC; 527 528 plts = (void *)s->sh_addr; 529 530 __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR); 531 532 mod->arch.ftrace_trampolines = plts; 533 #endif 534 return 0; 535 } 536 537 int module_finalize(const Elf_Ehdr *hdr, 538 const Elf_Shdr *sechdrs, 539 struct module *me) 540 { 541 const Elf_Shdr *s; 542 s = find_section(hdr, sechdrs, ".altinstructions"); 543 if (s) 544 apply_alternatives_module((void *)s->sh_addr, s->sh_size); 545 546 if (scs_is_dynamic()) { 547 s = find_section(hdr, sechdrs, ".init.eh_frame"); 548 if (s) 549 scs_patch((void *)s->sh_addr, s->sh_size); 550 } 551 552 return module_init_ftrace_plt(hdr, sechdrs, me); 553 } 554