1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "MCTargetDesc/ARMAsmBackend.h" 10 #include "MCTargetDesc/ARMAddressingModes.h" 11 #include "MCTargetDesc/ARMAsmBackendDarwin.h" 12 #include "MCTargetDesc/ARMAsmBackendELF.h" 13 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h" 14 #include "MCTargetDesc/ARMFixupKinds.h" 15 #include "MCTargetDesc/ARMMCTargetDesc.h" 16 #include "llvm/ADT/StringSwitch.h" 17 #include "llvm/BinaryFormat/ELF.h" 18 #include "llvm/BinaryFormat/MachO.h" 19 #include "llvm/MC/MCAsmBackend.h" 20 #include "llvm/MC/MCAssembler.h" 21 #include "llvm/MC/MCContext.h" 22 #include "llvm/MC/MCDirectives.h" 23 #include "llvm/MC/MCELFObjectWriter.h" 24 #include "llvm/MC/MCExpr.h" 25 #include "llvm/MC/MCFixupKindInfo.h" 26 #include "llvm/MC/MCObjectWriter.h" 27 #include "llvm/MC/MCRegisterInfo.h" 28 #include "llvm/MC/MCSectionELF.h" 29 #include "llvm/MC/MCSectionMachO.h" 30 #include "llvm/MC/MCSubtargetInfo.h" 31 #include "llvm/MC/MCValue.h" 32 #include "llvm/Support/Debug.h" 33 #include "llvm/Support/EndianStream.h" 34 #include "llvm/Support/ErrorHandling.h" 35 #include "llvm/Support/Format.h" 36 #include "llvm/Support/TargetParser.h" 37 #include "llvm/Support/raw_ostream.h" 38 using namespace llvm; 39 40 namespace { 41 class ARMELFObjectWriter : public MCELFObjectTargetWriter { 42 public: 43 ARMELFObjectWriter(uint8_t OSABI) 44 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM, 45 /*HasRelocationAddend*/ false) {} 46 }; 47 } // end anonymous namespace 48 49 Optional<MCFixupKind> ARMAsmBackend::getFixupKind(StringRef Name) const { 50 if (STI.getTargetTriple().isOSBinFormatELF() && Name == "R_ARM_NONE") 51 return FK_NONE; 52 53 return MCAsmBackend::getFixupKind(Name); 54 } 55 56 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { 57 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = { 58 // This table *must* be in the order that the fixup_* kinds are defined in 59 // ARMFixupKinds.h. 60 // 61 // Name Offset (bits) Size (bits) Flags 62 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 63 {"fixup_t2_ldst_pcrel_12", 0, 32, 64 MCFixupKindInfo::FKF_IsPCRel | 65 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 66 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 67 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 68 {"fixup_t2_pcrel_10", 0, 32, 69 MCFixupKindInfo::FKF_IsPCRel | 70 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 71 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 72 {"fixup_t2_pcrel_9", 0, 32, 73 MCFixupKindInfo::FKF_IsPCRel | 74 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 75 {"fixup_thumb_adr_pcrel_10", 0, 8, 76 MCFixupKindInfo::FKF_IsPCRel | 77 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 78 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 79 {"fixup_t2_adr_pcrel_12", 0, 32, 80 MCFixupKindInfo::FKF_IsPCRel | 81 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 82 {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 83 {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 84 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 85 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 86 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 87 {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 88 {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 89 {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 90 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 91 {"fixup_arm_thumb_blx", 0, 32, 92 MCFixupKindInfo::FKF_IsPCRel | 93 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 94 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 95 {"fixup_arm_thumb_cp", 0, 8, 96 MCFixupKindInfo::FKF_IsPCRel | 97 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 98 {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel}, 99 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 100 // - 19. 101 {"fixup_arm_movt_hi16", 0, 20, 0}, 102 {"fixup_arm_movw_lo16", 0, 20, 0}, 103 {"fixup_t2_movt_hi16", 0, 20, 0}, 104 {"fixup_t2_movw_lo16", 0, 20, 0}, 105 {"fixup_arm_mod_imm", 0, 12, 0}, 106 {"fixup_t2_so_imm", 0, 26, 0}, 107 }; 108 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = { 109 // This table *must* be in the order that the fixup_* kinds are defined in 110 // ARMFixupKinds.h. 111 // 112 // Name Offset (bits) Size (bits) Flags 113 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 114 {"fixup_t2_ldst_pcrel_12", 0, 32, 115 MCFixupKindInfo::FKF_IsPCRel | 116 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 117 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 118 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 119 {"fixup_t2_pcrel_10", 0, 32, 120 MCFixupKindInfo::FKF_IsPCRel | 121 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 122 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 123 {"fixup_t2_pcrel_9", 0, 32, 124 MCFixupKindInfo::FKF_IsPCRel | 125 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 126 {"fixup_thumb_adr_pcrel_10", 8, 8, 127 MCFixupKindInfo::FKF_IsPCRel | 128 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 129 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 130 {"fixup_t2_adr_pcrel_12", 0, 32, 131 MCFixupKindInfo::FKF_IsPCRel | 132 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 133 {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 134 {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 135 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 136 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 137 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 138 {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 139 {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 140 {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 141 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 142 {"fixup_arm_thumb_blx", 0, 32, 143 MCFixupKindInfo::FKF_IsPCRel | 144 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 145 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 146 {"fixup_arm_thumb_cp", 8, 8, 147 MCFixupKindInfo::FKF_IsPCRel | 148 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 149 {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel}, 150 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 151 // - 19. 152 {"fixup_arm_movt_hi16", 12, 20, 0}, 153 {"fixup_arm_movw_lo16", 12, 20, 0}, 154 {"fixup_t2_movt_hi16", 12, 20, 0}, 155 {"fixup_t2_movw_lo16", 12, 20, 0}, 156 {"fixup_arm_mod_imm", 20, 12, 0}, 157 {"fixup_t2_so_imm", 26, 6, 0}, 158 }; 159 160 if (Kind < FirstTargetFixupKind) 161 return MCAsmBackend::getFixupKindInfo(Kind); 162 163 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 164 "Invalid kind!"); 165 return (Endian == support::little ? InfosLE 166 : InfosBE)[Kind - FirstTargetFixupKind]; 167 } 168 169 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) { 170 switch (Flag) { 171 default: 172 break; 173 case MCAF_Code16: 174 setIsThumb(true); 175 break; 176 case MCAF_Code32: 177 setIsThumb(false); 178 break; 179 } 180 } 181 182 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op, 183 const MCSubtargetInfo &STI) const { 184 bool HasThumb2 = STI.getFeatureBits()[ARM::FeatureThumb2]; 185 bool HasV8MBaselineOps = STI.getFeatureBits()[ARM::HasV8MBaselineOps]; 186 187 switch (Op) { 188 default: 189 return Op; 190 case ARM::tBcc: 191 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op; 192 case ARM::tLDRpci: 193 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op; 194 case ARM::tADR: 195 return HasThumb2 ? (unsigned)ARM::t2ADR : Op; 196 case ARM::tB: 197 return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op; 198 case ARM::tCBZ: 199 return ARM::tHINT; 200 case ARM::tCBNZ: 201 return ARM::tHINT; 202 } 203 } 204 205 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst, 206 const MCSubtargetInfo &STI) const { 207 if (getRelaxedOpcode(Inst.getOpcode(), STI) != Inst.getOpcode()) 208 return true; 209 return false; 210 } 211 212 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup, 213 uint64_t Value) const { 214 switch ((unsigned)Fixup.getKind()) { 215 case ARM::fixup_arm_thumb_br: { 216 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the 217 // low bit being an implied zero. There's an implied +4 offset for the 218 // branch, so we adjust the other way here to determine what's 219 // encodable. 220 // 221 // Relax if the value is too big for a (signed) i8. 222 int64_t Offset = int64_t(Value) - 4; 223 if (Offset > 2046 || Offset < -2048) 224 return "out of range pc-relative fixup value"; 225 break; 226 } 227 case ARM::fixup_arm_thumb_bcc: { 228 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the 229 // low bit being an implied zero. There's an implied +4 offset for the 230 // branch, so we adjust the other way here to determine what's 231 // encodable. 232 // 233 // Relax if the value is too big for a (signed) i8. 234 int64_t Offset = int64_t(Value) - 4; 235 if (Offset > 254 || Offset < -256) 236 return "out of range pc-relative fixup value"; 237 break; 238 } 239 case ARM::fixup_thumb_adr_pcrel_10: 240 case ARM::fixup_arm_thumb_cp: { 241 // If the immediate is negative, greater than 1020, or not a multiple 242 // of four, the wide version of the instruction must be used. 243 int64_t Offset = int64_t(Value) - 4; 244 if (Offset & 3) 245 return "misaligned pc-relative fixup value"; 246 else if (Offset > 1020 || Offset < 0) 247 return "out of range pc-relative fixup value"; 248 break; 249 } 250 case ARM::fixup_arm_thumb_cb: { 251 // If we have a Thumb CBZ or CBNZ instruction and its target is the next 252 // instruction it is actually out of range for the instruction. 253 // It will be changed to a NOP. 254 int64_t Offset = (Value & ~1); 255 if (Offset == 2) 256 return "will be converted to nop"; 257 break; 258 } 259 default: 260 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!"); 261 } 262 return nullptr; 263 } 264 265 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 266 const MCRelaxableFragment *DF, 267 const MCAsmLayout &Layout) const { 268 return reasonForFixupRelaxation(Fixup, Value); 269 } 270 271 void ARMAsmBackend::relaxInstruction(const MCInst &Inst, 272 const MCSubtargetInfo &STI, 273 MCInst &Res) const { 274 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode(), STI); 275 276 // Sanity check w/ diagnostic if we get here w/ a bogus instruction. 277 if (RelaxedOp == Inst.getOpcode()) { 278 SmallString<256> Tmp; 279 raw_svector_ostream OS(Tmp); 280 Inst.dump_pretty(OS); 281 OS << "\n"; 282 report_fatal_error("unexpected instruction to relax: " + OS.str()); 283 } 284 285 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we 286 // have to change the operands too. 287 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) && 288 RelaxedOp == ARM::tHINT) { 289 Res.setOpcode(RelaxedOp); 290 Res.addOperand(MCOperand::createImm(0)); 291 Res.addOperand(MCOperand::createImm(14)); 292 Res.addOperand(MCOperand::createReg(0)); 293 return; 294 } 295 296 // The rest of instructions we're relaxing have the same operands. 297 // We just need to update to the proper opcode. 298 Res = Inst; 299 Res.setOpcode(RelaxedOp); 300 } 301 302 bool ARMAsmBackend::writeNopData(raw_ostream &OS, uint64_t Count) const { 303 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8 304 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP 305 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0 306 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP 307 if (isThumb()) { 308 const uint16_t nopEncoding = 309 hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding; 310 uint64_t NumNops = Count / 2; 311 for (uint64_t i = 0; i != NumNops; ++i) 312 support::endian::write(OS, nopEncoding, Endian); 313 if (Count & 1) 314 OS << '\0'; 315 return true; 316 } 317 // ARM mode 318 const uint32_t nopEncoding = 319 hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding; 320 uint64_t NumNops = Count / 4; 321 for (uint64_t i = 0; i != NumNops; ++i) 322 support::endian::write(OS, nopEncoding, Endian); 323 // FIXME: should this function return false when unable to write exactly 324 // 'Count' bytes with NOP encodings? 325 switch (Count % 4) { 326 default: 327 break; // No leftover bytes to write 328 case 1: 329 OS << '\0'; 330 break; 331 case 2: 332 OS.write("\0\0", 2); 333 break; 334 case 3: 335 OS.write("\0\0\xa0", 3); 336 break; 337 } 338 339 return true; 340 } 341 342 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) { 343 if (IsLittleEndian) { 344 // Note that the halfwords are stored high first and low second in thumb; 345 // so we need to swap the fixup value here to map properly. 346 uint32_t Swapped = (Value & 0xFFFF0000) >> 16; 347 Swapped |= (Value & 0x0000FFFF) << 16; 348 return Swapped; 349 } else 350 return Value; 351 } 352 353 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf, 354 bool IsLittleEndian) { 355 uint32_t Value; 356 357 if (IsLittleEndian) { 358 Value = (SecondHalf & 0xFFFF) << 16; 359 Value |= (FirstHalf & 0xFFFF); 360 } else { 361 Value = (SecondHalf & 0xFFFF); 362 Value |= (FirstHalf & 0xFFFF) << 16; 363 } 364 365 return Value; 366 } 367 368 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm, 369 const MCFixup &Fixup, 370 const MCValue &Target, uint64_t Value, 371 bool IsResolved, MCContext &Ctx, 372 const MCSubtargetInfo* STI) const { 373 unsigned Kind = Fixup.getKind(); 374 375 // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT 376 // and .word relocations they put the Thumb bit into the addend if possible. 377 // Other relocation types don't want this bit though (branches couldn't encode 378 // it if it *was* present, and no other relocations exist) and it can 379 // interfere with checking valid expressions. 380 if (const MCSymbolRefExpr *A = Target.getSymA()) { 381 if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) && 382 A->getSymbol().isExternal() && 383 (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 || 384 Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 || 385 Kind == ARM::fixup_t2_movt_hi16)) 386 Value |= 1; 387 } 388 389 switch (Kind) { 390 default: 391 Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type"); 392 return 0; 393 case FK_NONE: 394 case FK_Data_1: 395 case FK_Data_2: 396 case FK_Data_4: 397 return Value; 398 case FK_SecRel_2: 399 return Value; 400 case FK_SecRel_4: 401 return Value; 402 case ARM::fixup_arm_movt_hi16: 403 assert(STI != nullptr); 404 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 405 Value >>= 16; 406 LLVM_FALLTHROUGH; 407 case ARM::fixup_arm_movw_lo16: { 408 unsigned Hi4 = (Value & 0xF000) >> 12; 409 unsigned Lo12 = Value & 0x0FFF; 410 // inst{19-16} = Hi4; 411 // inst{11-0} = Lo12; 412 Value = (Hi4 << 16) | (Lo12); 413 return Value; 414 } 415 case ARM::fixup_t2_movt_hi16: 416 assert(STI != nullptr); 417 if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF()) 418 Value >>= 16; 419 LLVM_FALLTHROUGH; 420 case ARM::fixup_t2_movw_lo16: { 421 unsigned Hi4 = (Value & 0xF000) >> 12; 422 unsigned i = (Value & 0x800) >> 11; 423 unsigned Mid3 = (Value & 0x700) >> 8; 424 unsigned Lo8 = Value & 0x0FF; 425 // inst{19-16} = Hi4; 426 // inst{26} = i; 427 // inst{14-12} = Mid3; 428 // inst{7-0} = Lo8; 429 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8); 430 return swapHalfWords(Value, Endian == support::little); 431 } 432 case ARM::fixup_arm_ldst_pcrel_12: 433 // ARM PC-relative values are offset by 8. 434 Value -= 4; 435 LLVM_FALLTHROUGH; 436 case ARM::fixup_t2_ldst_pcrel_12: { 437 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 438 Value -= 4; 439 bool isAdd = true; 440 if ((int64_t)Value < 0) { 441 Value = -Value; 442 isAdd = false; 443 } 444 if (Value >= 4096) { 445 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 446 return 0; 447 } 448 Value |= isAdd << 23; 449 450 // Same addressing mode as fixup_arm_pcrel_10, 451 // but with 16-bit halfwords swapped. 452 if (Kind == ARM::fixup_t2_ldst_pcrel_12) 453 return swapHalfWords(Value, Endian == support::little); 454 455 return Value; 456 } 457 case ARM::fixup_arm_adr_pcrel_12: { 458 // ARM PC-relative values are offset by 8. 459 Value -= 8; 460 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100 461 if ((int64_t)Value < 0) { 462 Value = -Value; 463 opc = 2; // 0b0010 464 } 465 if (ARM_AM::getSOImmVal(Value) == -1) { 466 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 467 return 0; 468 } 469 // Encode the immediate and shift the opcode into place. 470 return ARM_AM::getSOImmVal(Value) | (opc << 21); 471 } 472 473 case ARM::fixup_t2_adr_pcrel_12: { 474 Value -= 4; 475 unsigned opc = 0; 476 if ((int64_t)Value < 0) { 477 Value = -Value; 478 opc = 5; 479 } 480 481 uint32_t out = (opc << 21); 482 out |= (Value & 0x800) << 15; 483 out |= (Value & 0x700) << 4; 484 out |= (Value & 0x0FF); 485 486 return swapHalfWords(out, Endian == support::little); 487 } 488 489 case ARM::fixup_arm_condbranch: 490 case ARM::fixup_arm_uncondbranch: 491 case ARM::fixup_arm_uncondbl: 492 case ARM::fixup_arm_condbl: 493 case ARM::fixup_arm_blx: 494 // These values don't encode the low two bits since they're always zero. 495 // Offset by 8 just as above. 496 if (const MCSymbolRefExpr *SRE = 497 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 498 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL) 499 return 0; 500 return 0xffffff & ((Value - 8) >> 2); 501 case ARM::fixup_t2_uncondbranch: { 502 Value = Value - 4; 503 if (!isInt<25>(Value)) { 504 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 505 return 0; 506 } 507 508 Value >>= 1; // Low bit is not encoded. 509 510 uint32_t out = 0; 511 bool I = Value & 0x800000; 512 bool J1 = Value & 0x400000; 513 bool J2 = Value & 0x200000; 514 J1 ^= I; 515 J2 ^= I; 516 517 out |= I << 26; // S bit 518 out |= !J1 << 13; // J1 bit 519 out |= !J2 << 11; // J2 bit 520 out |= (Value & 0x1FF800) << 5; // imm6 field 521 out |= (Value & 0x0007FF); // imm11 field 522 523 return swapHalfWords(out, Endian == support::little); 524 } 525 case ARM::fixup_t2_condbranch: { 526 Value = Value - 4; 527 if (!isInt<21>(Value)) { 528 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 529 return 0; 530 } 531 532 Value >>= 1; // Low bit is not encoded. 533 534 uint64_t out = 0; 535 out |= (Value & 0x80000) << 7; // S bit 536 out |= (Value & 0x40000) >> 7; // J2 bit 537 out |= (Value & 0x20000) >> 4; // J1 bit 538 out |= (Value & 0x1F800) << 5; // imm6 field 539 out |= (Value & 0x007FF); // imm11 field 540 541 return swapHalfWords(out, Endian == support::little); 542 } 543 case ARM::fixup_arm_thumb_bl: { 544 if (!isInt<25>(Value - 4) || 545 (!STI->getFeatureBits()[ARM::FeatureThumb2] && 546 !STI->getFeatureBits()[ARM::HasV8MBaselineOps] && 547 !STI->getFeatureBits()[ARM::HasV6MOps] && 548 !isInt<23>(Value - 4))) { 549 Ctx.reportError(Fixup.getLoc(), "Relocation out of range"); 550 return 0; 551 } 552 553 // The value doesn't encode the low bit (always zero) and is offset by 554 // four. The 32-bit immediate value is encoded as 555 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0) 556 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 557 // The value is encoded into disjoint bit positions in the destination 558 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 559 // J = either J1 or J2 bit 560 // 561 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII 562 // 563 // Note that the halfwords are stored high first, low second; so we need 564 // to transpose the fixup value here to map properly. 565 uint32_t offset = (Value - 4) >> 1; 566 uint32_t signBit = (offset & 0x800000) >> 23; 567 uint32_t I1Bit = (offset & 0x400000) >> 22; 568 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 569 uint32_t I2Bit = (offset & 0x200000) >> 21; 570 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 571 uint32_t imm10Bits = (offset & 0x1FF800) >> 11; 572 uint32_t imm11Bits = (offset & 0x000007FF); 573 574 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits); 575 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 576 (uint16_t)imm11Bits); 577 return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little); 578 } 579 case ARM::fixup_arm_thumb_blx: { 580 // The value doesn't encode the low two bits (always zero) and is offset by 581 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as 582 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00) 583 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 584 // The value is encoded into disjoint bit positions in the destination 585 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 586 // J = either J1 or J2 bit, 0 = zero. 587 // 588 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0 589 // 590 // Note that the halfwords are stored high first, low second; so we need 591 // to transpose the fixup value here to map properly. 592 if (Value % 4 != 0) { 593 Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination"); 594 return 0; 595 } 596 597 uint32_t offset = (Value - 4) >> 2; 598 if (const MCSymbolRefExpr *SRE = 599 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 600 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL) 601 offset = 0; 602 uint32_t signBit = (offset & 0x400000) >> 22; 603 uint32_t I1Bit = (offset & 0x200000) >> 21; 604 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 605 uint32_t I2Bit = (offset & 0x100000) >> 20; 606 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 607 uint32_t imm10HBits = (offset & 0xFFC00) >> 10; 608 uint32_t imm10LBits = (offset & 0x3FF); 609 610 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits); 611 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 612 ((uint16_t)imm10LBits) << 1); 613 return joinHalfWords(FirstHalf, SecondHalf, Endian == support::little); 614 } 615 case ARM::fixup_thumb_adr_pcrel_10: 616 case ARM::fixup_arm_thumb_cp: 617 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we 618 // could have an error on our hands. 619 assert(STI != nullptr); 620 if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) { 621 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 622 if (FixupDiagnostic) { 623 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 624 return 0; 625 } 626 } 627 // Offset by 4, and don't encode the low two bits. 628 return ((Value - 4) >> 2) & 0xff; 629 case ARM::fixup_arm_thumb_cb: { 630 // CB instructions can only branch to offsets in [4, 126] in multiples of 2 631 // so ensure that the raw value LSB is zero and it lies in [2, 130]. 632 // An offset of 2 will be relaxed to a NOP. 633 if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) { 634 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 635 return 0; 636 } 637 // Offset by 4 and don't encode the lower bit, which is always 0. 638 // FIXME: diagnose if no Thumb2 639 uint32_t Binary = (Value - 4) >> 1; 640 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3); 641 } 642 case ARM::fixup_arm_thumb_br: 643 // Offset by 4 and don't encode the lower bit, which is always 0. 644 assert(STI != nullptr); 645 if (!STI->getFeatureBits()[ARM::FeatureThumb2] && 646 !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) { 647 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 648 if (FixupDiagnostic) { 649 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 650 return 0; 651 } 652 } 653 return ((Value - 4) >> 1) & 0x7ff; 654 case ARM::fixup_arm_thumb_bcc: 655 // Offset by 4 and don't encode the lower bit, which is always 0. 656 assert(STI != nullptr); 657 if (!STI->getFeatureBits()[ARM::FeatureThumb2]) { 658 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 659 if (FixupDiagnostic) { 660 Ctx.reportError(Fixup.getLoc(), FixupDiagnostic); 661 return 0; 662 } 663 } 664 return ((Value - 4) >> 1) & 0xff; 665 case ARM::fixup_arm_pcrel_10_unscaled: { 666 Value = Value - 8; // ARM fixups offset by an additional word and don't 667 // need to adjust for the half-word ordering. 668 bool isAdd = true; 669 if ((int64_t)Value < 0) { 670 Value = -Value; 671 isAdd = false; 672 } 673 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8]. 674 if (Value >= 256) { 675 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 676 return 0; 677 } 678 Value = (Value & 0xf) | ((Value & 0xf0) << 4); 679 return Value | (isAdd << 23); 680 } 681 case ARM::fixup_arm_pcrel_10: 682 Value = Value - 4; // ARM fixups offset by an additional word and don't 683 // need to adjust for the half-word ordering. 684 LLVM_FALLTHROUGH; 685 case ARM::fixup_t2_pcrel_10: { 686 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 687 Value = Value - 4; 688 bool isAdd = true; 689 if ((int64_t)Value < 0) { 690 Value = -Value; 691 isAdd = false; 692 } 693 // These values don't encode the low two bits since they're always zero. 694 Value >>= 2; 695 if (Value >= 256) { 696 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 697 return 0; 698 } 699 Value |= isAdd << 23; 700 701 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords 702 // swapped. 703 if (Kind == ARM::fixup_t2_pcrel_10) 704 return swapHalfWords(Value, Endian == support::little); 705 706 return Value; 707 } 708 case ARM::fixup_arm_pcrel_9: 709 Value = Value - 4; // ARM fixups offset by an additional word and don't 710 // need to adjust for the half-word ordering. 711 LLVM_FALLTHROUGH; 712 case ARM::fixup_t2_pcrel_9: { 713 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 714 Value = Value - 4; 715 bool isAdd = true; 716 if ((int64_t)Value < 0) { 717 Value = -Value; 718 isAdd = false; 719 } 720 // These values don't encode the low bit since it's always zero. 721 if (Value & 1) { 722 Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup"); 723 return 0; 724 } 725 Value >>= 1; 726 if (Value >= 256) { 727 Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 728 return 0; 729 } 730 Value |= isAdd << 23; 731 732 // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords 733 // swapped. 734 if (Kind == ARM::fixup_t2_pcrel_9) 735 return swapHalfWords(Value, Endian == support::little); 736 737 return Value; 738 } 739 case ARM::fixup_arm_mod_imm: 740 Value = ARM_AM::getSOImmVal(Value); 741 if (Value >> 12) { 742 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value"); 743 return 0; 744 } 745 return Value; 746 case ARM::fixup_t2_so_imm: { 747 Value = ARM_AM::getT2SOImmVal(Value); 748 if ((int64_t)Value < 0) { 749 Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value"); 750 return 0; 751 } 752 // Value will contain a 12-bit value broken up into a 4-bit shift in bits 753 // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate 754 // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit 755 // 10 of the upper half-word and imm3 is placed at 14:12 of the lower 756 // half-word. 757 uint64_t EncValue = 0; 758 EncValue |= (Value & 0x800) << 15; 759 EncValue |= (Value & 0x700) << 4; 760 EncValue |= (Value & 0xff); 761 return swapHalfWords(EncValue, Endian == support::little); 762 } 763 } 764 } 765 766 bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm, 767 const MCFixup &Fixup, 768 const MCValue &Target) { 769 const MCSymbolRefExpr *A = Target.getSymA(); 770 const MCSymbol *Sym = A ? &A->getSymbol() : nullptr; 771 const unsigned FixupKind = Fixup.getKind() ; 772 if (FixupKind == FK_NONE) 773 return true; 774 if (FixupKind == ARM::fixup_arm_thumb_bl) { 775 assert(Sym && "How did we resolve this?"); 776 777 // If the symbol is external the linker will handle it. 778 // FIXME: Should we handle it as an optimization? 779 780 // If the symbol is out of range, produce a relocation and hope the 781 // linker can handle it. GNU AS produces an error in this case. 782 if (Sym->isExternal()) 783 return true; 784 } 785 // Create relocations for unconditional branches to function symbols with 786 // different execution mode in ELF binaries. 787 if (Sym && Sym->isELF()) { 788 unsigned Type = cast<MCSymbolELF>(Sym)->getType(); 789 if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) { 790 if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch)) 791 return true; 792 if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br || 793 FixupKind == ARM::fixup_arm_thumb_bl || 794 FixupKind == ARM::fixup_t2_condbranch || 795 FixupKind == ARM::fixup_t2_uncondbranch)) 796 return true; 797 } 798 } 799 // We must always generate a relocation for BL/BLX instructions if we have 800 // a symbol to reference, as the linker relies on knowing the destination 801 // symbol's thumb-ness to get interworking right. 802 if (A && (FixupKind == ARM::fixup_arm_thumb_blx || 803 FixupKind == ARM::fixup_arm_blx || 804 FixupKind == ARM::fixup_arm_uncondbl || 805 FixupKind == ARM::fixup_arm_condbl)) 806 return true; 807 return false; 808 } 809 810 /// getFixupKindNumBytes - The number of bytes the fixup may change. 811 static unsigned getFixupKindNumBytes(unsigned Kind) { 812 switch (Kind) { 813 default: 814 llvm_unreachable("Unknown fixup kind!"); 815 816 case FK_NONE: 817 return 0; 818 819 case FK_Data_1: 820 case ARM::fixup_arm_thumb_bcc: 821 case ARM::fixup_arm_thumb_cp: 822 case ARM::fixup_thumb_adr_pcrel_10: 823 return 1; 824 825 case FK_Data_2: 826 case ARM::fixup_arm_thumb_br: 827 case ARM::fixup_arm_thumb_cb: 828 case ARM::fixup_arm_mod_imm: 829 return 2; 830 831 case ARM::fixup_arm_pcrel_10_unscaled: 832 case ARM::fixup_arm_ldst_pcrel_12: 833 case ARM::fixup_arm_pcrel_10: 834 case ARM::fixup_arm_pcrel_9: 835 case ARM::fixup_arm_adr_pcrel_12: 836 case ARM::fixup_arm_uncondbl: 837 case ARM::fixup_arm_condbl: 838 case ARM::fixup_arm_blx: 839 case ARM::fixup_arm_condbranch: 840 case ARM::fixup_arm_uncondbranch: 841 return 3; 842 843 case FK_Data_4: 844 case ARM::fixup_t2_ldst_pcrel_12: 845 case ARM::fixup_t2_condbranch: 846 case ARM::fixup_t2_uncondbranch: 847 case ARM::fixup_t2_pcrel_10: 848 case ARM::fixup_t2_pcrel_9: 849 case ARM::fixup_t2_adr_pcrel_12: 850 case ARM::fixup_arm_thumb_bl: 851 case ARM::fixup_arm_thumb_blx: 852 case ARM::fixup_arm_movt_hi16: 853 case ARM::fixup_arm_movw_lo16: 854 case ARM::fixup_t2_movt_hi16: 855 case ARM::fixup_t2_movw_lo16: 856 case ARM::fixup_t2_so_imm: 857 return 4; 858 859 case FK_SecRel_2: 860 return 2; 861 case FK_SecRel_4: 862 return 4; 863 } 864 } 865 866 /// getFixupKindContainerSizeBytes - The number of bytes of the 867 /// container involved in big endian. 868 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) { 869 switch (Kind) { 870 default: 871 llvm_unreachable("Unknown fixup kind!"); 872 873 case FK_NONE: 874 return 0; 875 876 case FK_Data_1: 877 return 1; 878 case FK_Data_2: 879 return 2; 880 case FK_Data_4: 881 return 4; 882 883 case ARM::fixup_arm_thumb_bcc: 884 case ARM::fixup_arm_thumb_cp: 885 case ARM::fixup_thumb_adr_pcrel_10: 886 case ARM::fixup_arm_thumb_br: 887 case ARM::fixup_arm_thumb_cb: 888 // Instruction size is 2 bytes. 889 return 2; 890 891 case ARM::fixup_arm_pcrel_10_unscaled: 892 case ARM::fixup_arm_ldst_pcrel_12: 893 case ARM::fixup_arm_pcrel_10: 894 case ARM::fixup_arm_adr_pcrel_12: 895 case ARM::fixup_arm_uncondbl: 896 case ARM::fixup_arm_condbl: 897 case ARM::fixup_arm_blx: 898 case ARM::fixup_arm_condbranch: 899 case ARM::fixup_arm_uncondbranch: 900 case ARM::fixup_t2_ldst_pcrel_12: 901 case ARM::fixup_t2_condbranch: 902 case ARM::fixup_t2_uncondbranch: 903 case ARM::fixup_t2_pcrel_10: 904 case ARM::fixup_t2_adr_pcrel_12: 905 case ARM::fixup_arm_thumb_bl: 906 case ARM::fixup_arm_thumb_blx: 907 case ARM::fixup_arm_movt_hi16: 908 case ARM::fixup_arm_movw_lo16: 909 case ARM::fixup_t2_movt_hi16: 910 case ARM::fixup_t2_movw_lo16: 911 case ARM::fixup_arm_mod_imm: 912 case ARM::fixup_t2_so_imm: 913 // Instruction size is 4 bytes. 914 return 4; 915 } 916 } 917 918 void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup, 919 const MCValue &Target, 920 MutableArrayRef<char> Data, uint64_t Value, 921 bool IsResolved, 922 const MCSubtargetInfo* STI) const { 923 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); 924 MCContext &Ctx = Asm.getContext(); 925 Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx, STI); 926 if (!Value) 927 return; // Doesn't change encoding. 928 929 unsigned Offset = Fixup.getOffset(); 930 assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!"); 931 932 // Used to point to big endian bytes. 933 unsigned FullSizeBytes; 934 if (Endian == support::big) { 935 FullSizeBytes = getFixupKindContainerSizeBytes(Fixup.getKind()); 936 assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!"); 937 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!"); 938 } 939 940 // For each byte of the fragment that the fixup touches, mask in the bits from 941 // the fixup value. The Value has been "split up" into the appropriate 942 // bitfields above. 943 for (unsigned i = 0; i != NumBytes; ++i) { 944 unsigned Idx = Endian == support::little ? i : (FullSizeBytes - 1 - i); 945 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); 946 } 947 } 948 949 namespace CU { 950 951 /// Compact unwind encoding values. 952 enum CompactUnwindEncodings { 953 UNWIND_ARM_MODE_MASK = 0x0F000000, 954 UNWIND_ARM_MODE_FRAME = 0x01000000, 955 UNWIND_ARM_MODE_FRAME_D = 0x02000000, 956 UNWIND_ARM_MODE_DWARF = 0x04000000, 957 958 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000, 959 960 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001, 961 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002, 962 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004, 963 964 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008, 965 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010, 966 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020, 967 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040, 968 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080, 969 970 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00, 971 972 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF 973 }; 974 975 } // end CU namespace 976 977 /// Generate compact unwind encoding for the function based on the CFI 978 /// instructions. If the CFI instructions describe a frame that cannot be 979 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which 980 /// tells the runtime to fallback and unwind using dwarf. 981 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding( 982 ArrayRef<MCCFIInstruction> Instrs) const { 983 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n"); 984 // Only armv7k uses CFI based unwinding. 985 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K) 986 return 0; 987 // No .cfi directives means no frame. 988 if (Instrs.empty()) 989 return 0; 990 // Start off assuming CFA is at SP+0. 991 int CFARegister = ARM::SP; 992 int CFARegisterOffset = 0; 993 // Mark savable registers as initially unsaved 994 DenseMap<unsigned, int> RegOffsets; 995 int FloatRegCount = 0; 996 // Process each .cfi directive and build up compact unwind info. 997 for (size_t i = 0, e = Instrs.size(); i != e; ++i) { 998 int Reg; 999 const MCCFIInstruction &Inst = Instrs[i]; 1000 switch (Inst.getOperation()) { 1001 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa 1002 CFARegisterOffset = -Inst.getOffset(); 1003 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true); 1004 break; 1005 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset 1006 CFARegisterOffset = -Inst.getOffset(); 1007 break; 1008 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register 1009 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true); 1010 break; 1011 case MCCFIInstruction::OpOffset: // DW_CFA_offset 1012 Reg = MRI.getLLVMRegNum(Inst.getRegister(), true); 1013 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 1014 RegOffsets[Reg] = Inst.getOffset(); 1015 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 1016 RegOffsets[Reg] = Inst.getOffset(); 1017 ++FloatRegCount; 1018 } else { 1019 DEBUG_WITH_TYPE("compact-unwind", 1020 llvm::dbgs() << ".cfi_offset on unknown register=" 1021 << Inst.getRegister() << "\n"); 1022 return CU::UNWIND_ARM_MODE_DWARF; 1023 } 1024 break; 1025 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc 1026 // Ignore 1027 break; 1028 default: 1029 // Directive not convertable to compact unwind, bail out. 1030 DEBUG_WITH_TYPE("compact-unwind", 1031 llvm::dbgs() 1032 << "CFI directive not compatiable with comact " 1033 "unwind encoding, opcode=" << Inst.getOperation() 1034 << "\n"); 1035 return CU::UNWIND_ARM_MODE_DWARF; 1036 break; 1037 } 1038 } 1039 1040 // If no frame set up, return no unwind info. 1041 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0)) 1042 return 0; 1043 1044 // Verify standard frame (lr/r7) was used. 1045 if (CFARegister != ARM::R7) { 1046 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is " 1047 << CFARegister 1048 << " instead of r7\n"); 1049 return CU::UNWIND_ARM_MODE_DWARF; 1050 } 1051 int StackAdjust = CFARegisterOffset - 8; 1052 if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) { 1053 DEBUG_WITH_TYPE("compact-unwind", 1054 llvm::dbgs() 1055 << "LR not saved as standard frame, StackAdjust=" 1056 << StackAdjust 1057 << ", CFARegisterOffset=" << CFARegisterOffset 1058 << ", lr save at offset=" << RegOffsets[14] << "\n"); 1059 return CU::UNWIND_ARM_MODE_DWARF; 1060 } 1061 if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) { 1062 DEBUG_WITH_TYPE("compact-unwind", 1063 llvm::dbgs() << "r7 not saved as standard frame\n"); 1064 return CU::UNWIND_ARM_MODE_DWARF; 1065 } 1066 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME; 1067 1068 // If var-args are used, there may be a stack adjust required. 1069 switch (StackAdjust) { 1070 case 0: 1071 break; 1072 case 4: 1073 CompactUnwindEncoding |= 0x00400000; 1074 break; 1075 case 8: 1076 CompactUnwindEncoding |= 0x00800000; 1077 break; 1078 case 12: 1079 CompactUnwindEncoding |= 0x00C00000; 1080 break; 1081 default: 1082 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() 1083 << ".cfi_def_cfa stack adjust (" 1084 << StackAdjust << ") out of range\n"); 1085 return CU::UNWIND_ARM_MODE_DWARF; 1086 } 1087 1088 // If r6 is saved, it must be right below r7. 1089 static struct { 1090 unsigned Reg; 1091 unsigned Encoding; 1092 } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6}, 1093 {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5}, 1094 {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4}, 1095 {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12}, 1096 {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11}, 1097 {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10}, 1098 {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9}, 1099 {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}}; 1100 1101 int CurOffset = -8 - StackAdjust; 1102 for (auto CSReg : GPRCSRegs) { 1103 auto Offset = RegOffsets.find(CSReg.Reg); 1104 if (Offset == RegOffsets.end()) 1105 continue; 1106 1107 int RegOffset = Offset->second; 1108 if (RegOffset != CurOffset - 4) { 1109 DEBUG_WITH_TYPE("compact-unwind", 1110 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at " 1111 << RegOffset << " but only supported at " 1112 << CurOffset << "\n"); 1113 return CU::UNWIND_ARM_MODE_DWARF; 1114 } 1115 CompactUnwindEncoding |= CSReg.Encoding; 1116 CurOffset -= 4; 1117 } 1118 1119 // If no floats saved, we are done. 1120 if (FloatRegCount == 0) 1121 return CompactUnwindEncoding; 1122 1123 // Switch mode to include D register saving. 1124 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK; 1125 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D; 1126 1127 // FIXME: supporting more than 4 saved D-registers compactly would be trivial, 1128 // but needs coordination with the linker and libunwind. 1129 if (FloatRegCount > 4) { 1130 DEBUG_WITH_TYPE("compact-unwind", 1131 llvm::dbgs() << "unsupported number of D registers saved (" 1132 << FloatRegCount << ")\n"); 1133 return CU::UNWIND_ARM_MODE_DWARF; 1134 } 1135 1136 // Floating point registers must either be saved sequentially, or we defer to 1137 // DWARF. No gaps allowed here so check that each saved d-register is 1138 // precisely where it should be. 1139 static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 }; 1140 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) { 1141 auto Offset = RegOffsets.find(FPRCSRegs[Idx]); 1142 if (Offset == RegOffsets.end()) { 1143 DEBUG_WITH_TYPE("compact-unwind", 1144 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 1145 << MRI.getName(FPRCSRegs[Idx]) 1146 << " not saved\n"); 1147 return CU::UNWIND_ARM_MODE_DWARF; 1148 } else if (Offset->second != CurOffset - 8) { 1149 DEBUG_WITH_TYPE("compact-unwind", 1150 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 1151 << MRI.getName(FPRCSRegs[Idx]) 1152 << " saved at " << Offset->second 1153 << ", expected at " << CurOffset - 8 1154 << "\n"); 1155 return CU::UNWIND_ARM_MODE_DWARF; 1156 } 1157 CurOffset -= 8; 1158 } 1159 1160 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8); 1161 } 1162 1163 static MachO::CPUSubTypeARM getMachOSubTypeFromArch(StringRef Arch) { 1164 ARM::ArchKind AK = ARM::parseArch(Arch); 1165 switch (AK) { 1166 default: 1167 return MachO::CPU_SUBTYPE_ARM_V7; 1168 case ARM::ArchKind::ARMV4T: 1169 return MachO::CPU_SUBTYPE_ARM_V4T; 1170 case ARM::ArchKind::ARMV5T: 1171 case ARM::ArchKind::ARMV5TE: 1172 case ARM::ArchKind::ARMV5TEJ: 1173 return MachO::CPU_SUBTYPE_ARM_V5; 1174 case ARM::ArchKind::ARMV6: 1175 case ARM::ArchKind::ARMV6K: 1176 return MachO::CPU_SUBTYPE_ARM_V6; 1177 case ARM::ArchKind::ARMV7A: 1178 return MachO::CPU_SUBTYPE_ARM_V7; 1179 case ARM::ArchKind::ARMV7S: 1180 return MachO::CPU_SUBTYPE_ARM_V7S; 1181 case ARM::ArchKind::ARMV7K: 1182 return MachO::CPU_SUBTYPE_ARM_V7K; 1183 case ARM::ArchKind::ARMV6M: 1184 return MachO::CPU_SUBTYPE_ARM_V6M; 1185 case ARM::ArchKind::ARMV7M: 1186 return MachO::CPU_SUBTYPE_ARM_V7M; 1187 case ARM::ArchKind::ARMV7EM: 1188 return MachO::CPU_SUBTYPE_ARM_V7EM; 1189 } 1190 } 1191 1192 static MCAsmBackend *createARMAsmBackend(const Target &T, 1193 const MCSubtargetInfo &STI, 1194 const MCRegisterInfo &MRI, 1195 const MCTargetOptions &Options, 1196 support::endianness Endian) { 1197 const Triple &TheTriple = STI.getTargetTriple(); 1198 switch (TheTriple.getObjectFormat()) { 1199 default: 1200 llvm_unreachable("unsupported object format"); 1201 case Triple::MachO: { 1202 MachO::CPUSubTypeARM CS = getMachOSubTypeFromArch(TheTriple.getArchName()); 1203 return new ARMAsmBackendDarwin(T, STI, MRI, CS); 1204 } 1205 case Triple::COFF: 1206 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported"); 1207 return new ARMAsmBackendWinCOFF(T, STI); 1208 case Triple::ELF: 1209 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target"); 1210 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 1211 return new ARMAsmBackendELF(T, STI, OSABI, Endian); 1212 } 1213 } 1214 1215 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T, 1216 const MCSubtargetInfo &STI, 1217 const MCRegisterInfo &MRI, 1218 const MCTargetOptions &Options) { 1219 return createARMAsmBackend(T, STI, MRI, Options, support::little); 1220 } 1221 1222 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T, 1223 const MCSubtargetInfo &STI, 1224 const MCRegisterInfo &MRI, 1225 const MCTargetOptions &Options) { 1226 return createARMAsmBackend(T, STI, MRI, Options, support::big); 1227 } 1228