1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "MCTargetDesc/ARMMCTargetDesc.h" 11 #include "MCTargetDesc/ARMAddressingModes.h" 12 #include "MCTargetDesc/ARMAsmBackend.h" 13 #include "MCTargetDesc/ARMAsmBackendDarwin.h" 14 #include "MCTargetDesc/ARMAsmBackendELF.h" 15 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h" 16 #include "MCTargetDesc/ARMBaseInfo.h" 17 #include "MCTargetDesc/ARMFixupKinds.h" 18 #include "llvm/ADT/StringSwitch.h" 19 #include "llvm/MC/MCAsmBackend.h" 20 #include "llvm/MC/MCAssembler.h" 21 #include "llvm/MC/MCContext.h" 22 #include "llvm/MC/MCDirectives.h" 23 #include "llvm/MC/MCELFObjectWriter.h" 24 #include "llvm/MC/MCExpr.h" 25 #include "llvm/MC/MCFixupKindInfo.h" 26 #include "llvm/MC/MCMachObjectWriter.h" 27 #include "llvm/MC/MCObjectWriter.h" 28 #include "llvm/MC/MCRegisterInfo.h" 29 #include "llvm/MC/MCSectionELF.h" 30 #include "llvm/MC/MCSectionMachO.h" 31 #include "llvm/MC/MCSubtargetInfo.h" 32 #include "llvm/MC/MCValue.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/ELF.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/Format.h" 37 #include "llvm/Support/MachO.h" 38 #include "llvm/Support/TargetParser.h" 39 #include "llvm/Support/raw_ostream.h" 40 using namespace llvm; 41 42 namespace { 43 class ARMELFObjectWriter : public MCELFObjectTargetWriter { 44 public: 45 ARMELFObjectWriter(uint8_t OSABI) 46 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM, 47 /*HasRelocationAddend*/ false) {} 48 }; 49 } // end anonymous namespace 50 51 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { 52 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = { 53 // This table *must* be in the order that the fixup_* kinds are defined in 54 // ARMFixupKinds.h. 55 // 56 // Name Offset (bits) Size (bits) Flags 57 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 58 {"fixup_t2_ldst_pcrel_12", 0, 32, 59 MCFixupKindInfo::FKF_IsPCRel | 60 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 61 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 62 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 63 {"fixup_t2_pcrel_10", 0, 32, 64 MCFixupKindInfo::FKF_IsPCRel | 65 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 66 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 67 {"fixup_t2_pcrel_9", 0, 32, 68 MCFixupKindInfo::FKF_IsPCRel | 69 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 70 {"fixup_thumb_adr_pcrel_10", 0, 8, 71 MCFixupKindInfo::FKF_IsPCRel | 72 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 73 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 74 {"fixup_t2_adr_pcrel_12", 0, 32, 75 MCFixupKindInfo::FKF_IsPCRel | 76 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 77 {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 78 {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 79 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 80 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 81 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 82 {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 83 {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 84 {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 85 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 86 {"fixup_arm_thumb_blx", 0, 32, 87 MCFixupKindInfo::FKF_IsPCRel | 88 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 89 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 90 {"fixup_arm_thumb_cp", 0, 8, 91 MCFixupKindInfo::FKF_IsPCRel | 92 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 93 {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel}, 94 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 95 // - 19. 96 {"fixup_arm_movt_hi16", 0, 20, 0}, 97 {"fixup_arm_movw_lo16", 0, 20, 0}, 98 {"fixup_t2_movt_hi16", 0, 20, 0}, 99 {"fixup_t2_movw_lo16", 0, 20, 0}, 100 {"fixup_arm_mod_imm", 0, 12, 0}, 101 }; 102 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = { 103 // This table *must* be in the order that the fixup_* kinds are defined in 104 // ARMFixupKinds.h. 105 // 106 // Name Offset (bits) Size (bits) Flags 107 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 108 {"fixup_t2_ldst_pcrel_12", 0, 32, 109 MCFixupKindInfo::FKF_IsPCRel | 110 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 111 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 112 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 113 {"fixup_t2_pcrel_10", 0, 32, 114 MCFixupKindInfo::FKF_IsPCRel | 115 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 116 {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 117 {"fixup_t2_pcrel_9", 0, 32, 118 MCFixupKindInfo::FKF_IsPCRel | 119 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 120 {"fixup_thumb_adr_pcrel_10", 8, 8, 121 MCFixupKindInfo::FKF_IsPCRel | 122 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 123 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 124 {"fixup_t2_adr_pcrel_12", 0, 32, 125 MCFixupKindInfo::FKF_IsPCRel | 126 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 127 {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 128 {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 129 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 130 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 131 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 132 {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 133 {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 134 {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 135 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 136 {"fixup_arm_thumb_blx", 0, 32, 137 MCFixupKindInfo::FKF_IsPCRel | 138 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 139 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 140 {"fixup_arm_thumb_cp", 8, 8, 141 MCFixupKindInfo::FKF_IsPCRel | 142 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 143 {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel}, 144 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 145 // - 19. 146 {"fixup_arm_movt_hi16", 12, 20, 0}, 147 {"fixup_arm_movw_lo16", 12, 20, 0}, 148 {"fixup_t2_movt_hi16", 12, 20, 0}, 149 {"fixup_t2_movw_lo16", 12, 20, 0}, 150 {"fixup_arm_mod_imm", 20, 12, 0}, 151 }; 152 153 if (Kind < FirstTargetFixupKind) 154 return MCAsmBackend::getFixupKindInfo(Kind); 155 156 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 157 "Invalid kind!"); 158 return (IsLittleEndian ? InfosLE : InfosBE)[Kind - FirstTargetFixupKind]; 159 } 160 161 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) { 162 switch (Flag) { 163 default: 164 break; 165 case MCAF_Code16: 166 setIsThumb(true); 167 break; 168 case MCAF_Code32: 169 setIsThumb(false); 170 break; 171 } 172 } 173 174 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op) const { 175 bool HasThumb2 = STI->getFeatureBits()[ARM::FeatureThumb2]; 176 bool HasV8MBaselineOps = STI->getFeatureBits()[ARM::HasV8MBaselineOps]; 177 178 switch (Op) { 179 default: 180 return Op; 181 case ARM::tBcc: 182 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op; 183 case ARM::tLDRpci: 184 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op; 185 case ARM::tADR: 186 return HasThumb2 ? (unsigned)ARM::t2ADR : Op; 187 case ARM::tB: 188 return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op; 189 case ARM::tCBZ: 190 return ARM::tHINT; 191 case ARM::tCBNZ: 192 return ARM::tHINT; 193 } 194 } 195 196 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst) const { 197 if (getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode()) 198 return true; 199 return false; 200 } 201 202 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup, 203 uint64_t Value) const { 204 switch ((unsigned)Fixup.getKind()) { 205 case ARM::fixup_arm_thumb_br: { 206 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the 207 // low bit being an implied zero. There's an implied +4 offset for the 208 // branch, so we adjust the other way here to determine what's 209 // encodable. 210 // 211 // Relax if the value is too big for a (signed) i8. 212 int64_t Offset = int64_t(Value) - 4; 213 if (Offset > 2046 || Offset < -2048) 214 return "out of range pc-relative fixup value"; 215 break; 216 } 217 case ARM::fixup_arm_thumb_bcc: { 218 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the 219 // low bit being an implied zero. There's an implied +4 offset for the 220 // branch, so we adjust the other way here to determine what's 221 // encodable. 222 // 223 // Relax if the value is too big for a (signed) i8. 224 int64_t Offset = int64_t(Value) - 4; 225 if (Offset > 254 || Offset < -256) 226 return "out of range pc-relative fixup value"; 227 break; 228 } 229 case ARM::fixup_thumb_adr_pcrel_10: 230 case ARM::fixup_arm_thumb_cp: { 231 // If the immediate is negative, greater than 1020, or not a multiple 232 // of four, the wide version of the instruction must be used. 233 int64_t Offset = int64_t(Value) - 4; 234 if (Offset & 3) 235 return "misaligned pc-relative fixup value"; 236 else if (Offset > 1020 || Offset < 0) 237 return "out of range pc-relative fixup value"; 238 break; 239 } 240 case ARM::fixup_arm_thumb_cb: { 241 // If we have a Thumb CBZ or CBNZ instruction and its target is the next 242 // instruction it is is actually out of range for the instruction. 243 // It will be changed to a NOP. 244 int64_t Offset = (Value & ~1); 245 if (Offset == 2) 246 return "will be converted to nop"; 247 break; 248 } 249 default: 250 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!"); 251 } 252 return nullptr; 253 } 254 255 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 256 const MCRelaxableFragment *DF, 257 const MCAsmLayout &Layout) const { 258 return reasonForFixupRelaxation(Fixup, Value); 259 } 260 261 void ARMAsmBackend::relaxInstruction(const MCInst &Inst, 262 const MCSubtargetInfo &STI, 263 MCInst &Res) const { 264 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode()); 265 266 // Sanity check w/ diagnostic if we get here w/ a bogus instruction. 267 if (RelaxedOp == Inst.getOpcode()) { 268 SmallString<256> Tmp; 269 raw_svector_ostream OS(Tmp); 270 Inst.dump_pretty(OS); 271 OS << "\n"; 272 report_fatal_error("unexpected instruction to relax: " + OS.str()); 273 } 274 275 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we 276 // have to change the operands too. 277 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) && 278 RelaxedOp == ARM::tHINT) { 279 Res.setOpcode(RelaxedOp); 280 Res.addOperand(MCOperand::createImm(0)); 281 Res.addOperand(MCOperand::createImm(14)); 282 Res.addOperand(MCOperand::createReg(0)); 283 return; 284 } 285 286 // The rest of instructions we're relaxing have the same operands. 287 // We just need to update to the proper opcode. 288 Res = Inst; 289 Res.setOpcode(RelaxedOp); 290 } 291 292 bool ARMAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { 293 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8 294 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP 295 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0 296 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP 297 if (isThumb()) { 298 const uint16_t nopEncoding = 299 hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding; 300 uint64_t NumNops = Count / 2; 301 for (uint64_t i = 0; i != NumNops; ++i) 302 OW->write16(nopEncoding); 303 if (Count & 1) 304 OW->write8(0); 305 return true; 306 } 307 // ARM mode 308 const uint32_t nopEncoding = 309 hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding; 310 uint64_t NumNops = Count / 4; 311 for (uint64_t i = 0; i != NumNops; ++i) 312 OW->write32(nopEncoding); 313 // FIXME: should this function return false when unable to write exactly 314 // 'Count' bytes with NOP encodings? 315 switch (Count % 4) { 316 default: 317 break; // No leftover bytes to write 318 case 1: 319 OW->write8(0); 320 break; 321 case 2: 322 OW->write16(0); 323 break; 324 case 3: 325 OW->write16(0); 326 OW->write8(0xa0); 327 break; 328 } 329 330 return true; 331 } 332 333 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) { 334 if (IsLittleEndian) { 335 // Note that the halfwords are stored high first and low second in thumb; 336 // so we need to swap the fixup value here to map properly. 337 uint32_t Swapped = (Value & 0xFFFF0000) >> 16; 338 Swapped |= (Value & 0x0000FFFF) << 16; 339 return Swapped; 340 } else 341 return Value; 342 } 343 344 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf, 345 bool IsLittleEndian) { 346 uint32_t Value; 347 348 if (IsLittleEndian) { 349 Value = (SecondHalf & 0xFFFF) << 16; 350 Value |= (FirstHalf & 0xFFFF); 351 } else { 352 Value = (SecondHalf & 0xFFFF); 353 Value |= (FirstHalf & 0xFFFF) << 16; 354 } 355 356 return Value; 357 } 358 359 unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value, 360 bool IsPCRel, MCContext *Ctx, 361 bool IsLittleEndian, 362 bool IsResolved) const { 363 unsigned Kind = Fixup.getKind(); 364 switch (Kind) { 365 default: 366 llvm_unreachable("Unknown fixup kind!"); 367 case FK_Data_1: 368 case FK_Data_2: 369 case FK_Data_4: 370 return Value; 371 case FK_SecRel_2: 372 return Value; 373 case FK_SecRel_4: 374 return Value; 375 case ARM::fixup_arm_movt_hi16: 376 if (!IsPCRel) 377 Value >>= 16; 378 LLVM_FALLTHROUGH; 379 case ARM::fixup_arm_movw_lo16: { 380 unsigned Hi4 = (Value & 0xF000) >> 12; 381 unsigned Lo12 = Value & 0x0FFF; 382 // inst{19-16} = Hi4; 383 // inst{11-0} = Lo12; 384 Value = (Hi4 << 16) | (Lo12); 385 return Value; 386 } 387 case ARM::fixup_t2_movt_hi16: 388 if (!IsPCRel) 389 Value >>= 16; 390 LLVM_FALLTHROUGH; 391 case ARM::fixup_t2_movw_lo16: { 392 unsigned Hi4 = (Value & 0xF000) >> 12; 393 unsigned i = (Value & 0x800) >> 11; 394 unsigned Mid3 = (Value & 0x700) >> 8; 395 unsigned Lo8 = Value & 0x0FF; 396 // inst{19-16} = Hi4; 397 // inst{26} = i; 398 // inst{14-12} = Mid3; 399 // inst{7-0} = Lo8; 400 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8); 401 return swapHalfWords(Value, IsLittleEndian); 402 } 403 case ARM::fixup_arm_ldst_pcrel_12: 404 // ARM PC-relative values are offset by 8. 405 Value -= 4; 406 LLVM_FALLTHROUGH; 407 case ARM::fixup_t2_ldst_pcrel_12: { 408 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 409 Value -= 4; 410 bool isAdd = true; 411 if ((int64_t)Value < 0) { 412 Value = -Value; 413 isAdd = false; 414 } 415 if (Ctx && Value >= 4096) { 416 Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 417 return 0; 418 } 419 Value |= isAdd << 23; 420 421 // Same addressing mode as fixup_arm_pcrel_10, 422 // but with 16-bit halfwords swapped. 423 if (Kind == ARM::fixup_t2_ldst_pcrel_12) 424 return swapHalfWords(Value, IsLittleEndian); 425 426 return Value; 427 } 428 case ARM::fixup_arm_adr_pcrel_12: { 429 // ARM PC-relative values are offset by 8. 430 Value -= 8; 431 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100 432 if ((int64_t)Value < 0) { 433 Value = -Value; 434 opc = 2; // 0b0010 435 } 436 if (Ctx && ARM_AM::getSOImmVal(Value) == -1) { 437 Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 438 return 0; 439 } 440 // Encode the immediate and shift the opcode into place. 441 return ARM_AM::getSOImmVal(Value) | (opc << 21); 442 } 443 444 case ARM::fixup_t2_adr_pcrel_12: { 445 Value -= 4; 446 unsigned opc = 0; 447 if ((int64_t)Value < 0) { 448 Value = -Value; 449 opc = 5; 450 } 451 452 uint32_t out = (opc << 21); 453 out |= (Value & 0x800) << 15; 454 out |= (Value & 0x700) << 4; 455 out |= (Value & 0x0FF); 456 457 return swapHalfWords(out, IsLittleEndian); 458 } 459 460 case ARM::fixup_arm_condbranch: 461 case ARM::fixup_arm_uncondbranch: 462 case ARM::fixup_arm_uncondbl: 463 case ARM::fixup_arm_condbl: 464 case ARM::fixup_arm_blx: 465 // These values don't encode the low two bits since they're always zero. 466 // Offset by 8 just as above. 467 if (const MCSymbolRefExpr *SRE = 468 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 469 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL) 470 return 0; 471 return 0xffffff & ((Value - 8) >> 2); 472 case ARM::fixup_t2_uncondbranch: { 473 Value = Value - 4; 474 Value >>= 1; // Low bit is not encoded. 475 476 uint32_t out = 0; 477 bool I = Value & 0x800000; 478 bool J1 = Value & 0x400000; 479 bool J2 = Value & 0x200000; 480 J1 ^= I; 481 J2 ^= I; 482 483 out |= I << 26; // S bit 484 out |= !J1 << 13; // J1 bit 485 out |= !J2 << 11; // J2 bit 486 out |= (Value & 0x1FF800) << 5; // imm6 field 487 out |= (Value & 0x0007FF); // imm11 field 488 489 return swapHalfWords(out, IsLittleEndian); 490 } 491 case ARM::fixup_t2_condbranch: { 492 Value = Value - 4; 493 Value >>= 1; // Low bit is not encoded. 494 495 uint64_t out = 0; 496 out |= (Value & 0x80000) << 7; // S bit 497 out |= (Value & 0x40000) >> 7; // J2 bit 498 out |= (Value & 0x20000) >> 4; // J1 bit 499 out |= (Value & 0x1F800) << 5; // imm6 field 500 out |= (Value & 0x007FF); // imm11 field 501 502 return swapHalfWords(out, IsLittleEndian); 503 } 504 case ARM::fixup_arm_thumb_bl: { 505 // The value doesn't encode the low bit (always zero) and is offset by 506 // four. The 32-bit immediate value is encoded as 507 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0) 508 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 509 // The value is encoded into disjoint bit positions in the destination 510 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 511 // J = either J1 or J2 bit 512 // 513 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII 514 // 515 // Note that the halfwords are stored high first, low second; so we need 516 // to transpose the fixup value here to map properly. 517 uint32_t offset = (Value - 4) >> 1; 518 uint32_t signBit = (offset & 0x800000) >> 23; 519 uint32_t I1Bit = (offset & 0x400000) >> 22; 520 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 521 uint32_t I2Bit = (offset & 0x200000) >> 21; 522 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 523 uint32_t imm10Bits = (offset & 0x1FF800) >> 11; 524 uint32_t imm11Bits = (offset & 0x000007FF); 525 526 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits); 527 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 528 (uint16_t)imm11Bits); 529 return joinHalfWords(FirstHalf, SecondHalf, IsLittleEndian); 530 } 531 case ARM::fixup_arm_thumb_blx: { 532 // The value doesn't encode the low two bits (always zero) and is offset by 533 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as 534 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00) 535 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 536 // The value is encoded into disjoint bit positions in the destination 537 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 538 // J = either J1 or J2 bit, 0 = zero. 539 // 540 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0 541 // 542 // Note that the halfwords are stored high first, low second; so we need 543 // to transpose the fixup value here to map properly. 544 if (Ctx && Value % 4 != 0) { 545 Ctx->reportError(Fixup.getLoc(), "misaligned ARM call destination"); 546 return 0; 547 } 548 549 uint32_t offset = (Value - 4) >> 2; 550 if (const MCSymbolRefExpr *SRE = 551 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 552 if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL) 553 offset = 0; 554 uint32_t signBit = (offset & 0x400000) >> 22; 555 uint32_t I1Bit = (offset & 0x200000) >> 21; 556 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 557 uint32_t I2Bit = (offset & 0x100000) >> 20; 558 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 559 uint32_t imm10HBits = (offset & 0xFFC00) >> 10; 560 uint32_t imm10LBits = (offset & 0x3FF); 561 562 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits); 563 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 564 ((uint16_t)imm10LBits) << 1); 565 return joinHalfWords(FirstHalf, SecondHalf, IsLittleEndian); 566 } 567 case ARM::fixup_thumb_adr_pcrel_10: 568 case ARM::fixup_arm_thumb_cp: 569 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we 570 // could have an error on our hands. 571 if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) { 572 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 573 if (FixupDiagnostic) { 574 Ctx->reportError(Fixup.getLoc(), FixupDiagnostic); 575 return 0; 576 } 577 } 578 // Offset by 4, and don't encode the low two bits. 579 return ((Value - 4) >> 2) & 0xff; 580 case ARM::fixup_arm_thumb_cb: { 581 // CB instructions can only branch to offsets in [4, 126] in multiples of 2 582 // so ensure that the raw value LSB is zero and it lies in [2, 130]. 583 // An offset of 2 will be relaxed to a NOP. 584 if (Ctx && ((int64_t)Value < 2 || Value > 0x82 || Value & 1)) { 585 Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 586 return 0; 587 } 588 // Offset by 4 and don't encode the lower bit, which is always 0. 589 // FIXME: diagnose if no Thumb2 590 uint32_t Binary = (Value - 4) >> 1; 591 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3); 592 } 593 case ARM::fixup_arm_thumb_br: 594 // Offset by 4 and don't encode the lower bit, which is always 0. 595 if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2] && 596 !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) { 597 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 598 if (FixupDiagnostic) { 599 Ctx->reportError(Fixup.getLoc(), FixupDiagnostic); 600 return 0; 601 } 602 } 603 return ((Value - 4) >> 1) & 0x7ff; 604 case ARM::fixup_arm_thumb_bcc: 605 // Offset by 4 and don't encode the lower bit, which is always 0. 606 if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2]) { 607 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 608 if (FixupDiagnostic) { 609 Ctx->reportError(Fixup.getLoc(), FixupDiagnostic); 610 return 0; 611 } 612 } 613 return ((Value - 4) >> 1) & 0xff; 614 case ARM::fixup_arm_pcrel_10_unscaled: { 615 Value = Value - 8; // ARM fixups offset by an additional word and don't 616 // need to adjust for the half-word ordering. 617 bool isAdd = true; 618 if ((int64_t)Value < 0) { 619 Value = -Value; 620 isAdd = false; 621 } 622 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8]. 623 if (Ctx && Value >= 256) { 624 Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 625 return 0; 626 } 627 Value = (Value & 0xf) | ((Value & 0xf0) << 4); 628 return Value | (isAdd << 23); 629 } 630 case ARM::fixup_arm_pcrel_10: 631 Value = Value - 4; // ARM fixups offset by an additional word and don't 632 // need to adjust for the half-word ordering. 633 LLVM_FALLTHROUGH; 634 case ARM::fixup_t2_pcrel_10: { 635 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 636 Value = Value - 4; 637 bool isAdd = true; 638 if ((int64_t)Value < 0) { 639 Value = -Value; 640 isAdd = false; 641 } 642 // These values don't encode the low two bits since they're always zero. 643 Value >>= 2; 644 if (Ctx && Value >= 256) { 645 Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 646 return 0; 647 } 648 Value |= isAdd << 23; 649 650 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords 651 // swapped. 652 if (Kind == ARM::fixup_t2_pcrel_10) 653 return swapHalfWords(Value, IsLittleEndian); 654 655 return Value; 656 } 657 case ARM::fixup_arm_pcrel_9: 658 Value = Value - 4; // ARM fixups offset by an additional word and don't 659 // need to adjust for the half-word ordering. 660 LLVM_FALLTHROUGH; 661 case ARM::fixup_t2_pcrel_9: { 662 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 663 Value = Value - 4; 664 bool isAdd = true; 665 if ((int64_t)Value < 0) { 666 Value = -Value; 667 isAdd = false; 668 } 669 // These values don't encode the low bit since it's always zero. 670 if (Ctx && (Value & 1)) { 671 Ctx->reportError(Fixup.getLoc(), "invalid value for this fixup"); 672 return 0; 673 } 674 Value >>= 1; 675 if (Ctx && Value >= 256) { 676 Ctx->reportError(Fixup.getLoc(), "out of range pc-relative fixup value"); 677 return 0; 678 } 679 Value |= isAdd << 23; 680 681 // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords 682 // swapped. 683 if (Kind == ARM::fixup_t2_pcrel_9) 684 return swapHalfWords(Value, IsLittleEndian); 685 686 return Value; 687 } 688 case ARM::fixup_arm_mod_imm: 689 Value = ARM_AM::getSOImmVal(Value); 690 if (Ctx && Value >> 12) { 691 Ctx->reportError(Fixup.getLoc(), "out of range immediate fixup value"); 692 return 0; 693 } 694 return Value; 695 } 696 } 697 698 void ARMAsmBackend::processFixupValue(const MCAssembler &Asm, 699 const MCAsmLayout &Layout, 700 const MCFixup &Fixup, 701 const MCFragment *DF, 702 const MCValue &Target, uint64_t &Value, 703 bool &IsResolved) { 704 const MCSymbolRefExpr *A = Target.getSymA(); 705 const MCSymbol *Sym = A ? &A->getSymbol() : nullptr; 706 // MachO (the only user of "Value") tries to make .o files that look vaguely 707 // pre-linked, so for MOVW/MOVT and .word relocations they put the Thumb bit 708 // into the addend if possible. Other relocation types don't want this bit 709 // though (branches couldn't encode it if it *was* present, and no other 710 // relocations exist) and it can interfere with checking valid expressions. 711 if ((unsigned)Fixup.getKind() == FK_Data_4 || 712 (unsigned)Fixup.getKind() == ARM::fixup_arm_movw_lo16 || 713 (unsigned)Fixup.getKind() == ARM::fixup_arm_movt_hi16 || 714 (unsigned)Fixup.getKind() == ARM::fixup_t2_movw_lo16 || 715 (unsigned)Fixup.getKind() == ARM::fixup_t2_movt_hi16) { 716 if (Sym) { 717 if (Asm.isThumbFunc(Sym)) 718 Value |= 1; 719 } 720 } 721 if (IsResolved && (unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_bl) { 722 assert(Sym && "How did we resolve this?"); 723 724 // If the symbol is external the linker will handle it. 725 // FIXME: Should we handle it as an optimization? 726 727 // If the symbol is out of range, produce a relocation and hope the 728 // linker can handle it. GNU AS produces an error in this case. 729 if (Sym->isExternal() || Value >= 0x400004) 730 IsResolved = false; 731 } 732 // We must always generate a relocation for BL/BLX instructions if we have 733 // a symbol to reference, as the linker relies on knowing the destination 734 // symbol's thumb-ness to get interworking right. 735 if (A && ((unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_blx || 736 (unsigned)Fixup.getKind() == ARM::fixup_arm_blx || 737 (unsigned)Fixup.getKind() == ARM::fixup_arm_uncondbl || 738 (unsigned)Fixup.getKind() == ARM::fixup_arm_condbl)) 739 IsResolved = false; 740 741 // Try to get the encoded value for the fixup as-if we're mapping it into 742 // the instruction. This allows adjustFixupValue() to issue a diagnostic 743 // if the value aren't invalid. 744 (void)adjustFixupValue(Fixup, Value, false, &Asm.getContext(), 745 IsLittleEndian, IsResolved); 746 } 747 748 /// getFixupKindNumBytes - The number of bytes the fixup may change. 749 static unsigned getFixupKindNumBytes(unsigned Kind) { 750 switch (Kind) { 751 default: 752 llvm_unreachable("Unknown fixup kind!"); 753 754 case FK_Data_1: 755 case ARM::fixup_arm_thumb_bcc: 756 case ARM::fixup_arm_thumb_cp: 757 case ARM::fixup_thumb_adr_pcrel_10: 758 return 1; 759 760 case FK_Data_2: 761 case ARM::fixup_arm_thumb_br: 762 case ARM::fixup_arm_thumb_cb: 763 case ARM::fixup_arm_mod_imm: 764 return 2; 765 766 case ARM::fixup_arm_pcrel_10_unscaled: 767 case ARM::fixup_arm_ldst_pcrel_12: 768 case ARM::fixup_arm_pcrel_10: 769 case ARM::fixup_arm_pcrel_9: 770 case ARM::fixup_arm_adr_pcrel_12: 771 case ARM::fixup_arm_uncondbl: 772 case ARM::fixup_arm_condbl: 773 case ARM::fixup_arm_blx: 774 case ARM::fixup_arm_condbranch: 775 case ARM::fixup_arm_uncondbranch: 776 return 3; 777 778 case FK_Data_4: 779 case ARM::fixup_t2_ldst_pcrel_12: 780 case ARM::fixup_t2_condbranch: 781 case ARM::fixup_t2_uncondbranch: 782 case ARM::fixup_t2_pcrel_10: 783 case ARM::fixup_t2_pcrel_9: 784 case ARM::fixup_t2_adr_pcrel_12: 785 case ARM::fixup_arm_thumb_bl: 786 case ARM::fixup_arm_thumb_blx: 787 case ARM::fixup_arm_movt_hi16: 788 case ARM::fixup_arm_movw_lo16: 789 case ARM::fixup_t2_movt_hi16: 790 case ARM::fixup_t2_movw_lo16: 791 return 4; 792 793 case FK_SecRel_2: 794 return 2; 795 case FK_SecRel_4: 796 return 4; 797 } 798 } 799 800 /// getFixupKindContainerSizeBytes - The number of bytes of the 801 /// container involved in big endian. 802 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) { 803 switch (Kind) { 804 default: 805 llvm_unreachable("Unknown fixup kind!"); 806 807 case FK_Data_1: 808 return 1; 809 case FK_Data_2: 810 return 2; 811 case FK_Data_4: 812 return 4; 813 814 case ARM::fixup_arm_thumb_bcc: 815 case ARM::fixup_arm_thumb_cp: 816 case ARM::fixup_thumb_adr_pcrel_10: 817 case ARM::fixup_arm_thumb_br: 818 case ARM::fixup_arm_thumb_cb: 819 // Instruction size is 2 bytes. 820 return 2; 821 822 case ARM::fixup_arm_pcrel_10_unscaled: 823 case ARM::fixup_arm_ldst_pcrel_12: 824 case ARM::fixup_arm_pcrel_10: 825 case ARM::fixup_arm_adr_pcrel_12: 826 case ARM::fixup_arm_uncondbl: 827 case ARM::fixup_arm_condbl: 828 case ARM::fixup_arm_blx: 829 case ARM::fixup_arm_condbranch: 830 case ARM::fixup_arm_uncondbranch: 831 case ARM::fixup_t2_ldst_pcrel_12: 832 case ARM::fixup_t2_condbranch: 833 case ARM::fixup_t2_uncondbranch: 834 case ARM::fixup_t2_pcrel_10: 835 case ARM::fixup_t2_adr_pcrel_12: 836 case ARM::fixup_arm_thumb_bl: 837 case ARM::fixup_arm_thumb_blx: 838 case ARM::fixup_arm_movt_hi16: 839 case ARM::fixup_arm_movw_lo16: 840 case ARM::fixup_t2_movt_hi16: 841 case ARM::fixup_t2_movw_lo16: 842 case ARM::fixup_arm_mod_imm: 843 // Instruction size is 4 bytes. 844 return 4; 845 } 846 } 847 848 void ARMAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, 849 unsigned DataSize, uint64_t Value, 850 bool IsPCRel) const { 851 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); 852 Value = 853 adjustFixupValue(Fixup, Value, IsPCRel, nullptr, IsLittleEndian, true); 854 if (!Value) 855 return; // Doesn't change encoding. 856 857 unsigned Offset = Fixup.getOffset(); 858 assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!"); 859 860 // Used to point to big endian bytes. 861 unsigned FullSizeBytes; 862 if (!IsLittleEndian) { 863 FullSizeBytes = getFixupKindContainerSizeBytes(Fixup.getKind()); 864 assert((Offset + FullSizeBytes) <= DataSize && "Invalid fixup size!"); 865 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!"); 866 } 867 868 // For each byte of the fragment that the fixup touches, mask in the bits from 869 // the fixup value. The Value has been "split up" into the appropriate 870 // bitfields above. 871 for (unsigned i = 0; i != NumBytes; ++i) { 872 unsigned Idx = IsLittleEndian ? i : (FullSizeBytes - 1 - i); 873 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); 874 } 875 } 876 877 namespace CU { 878 879 /// \brief Compact unwind encoding values. 880 enum CompactUnwindEncodings { 881 UNWIND_ARM_MODE_MASK = 0x0F000000, 882 UNWIND_ARM_MODE_FRAME = 0x01000000, 883 UNWIND_ARM_MODE_FRAME_D = 0x02000000, 884 UNWIND_ARM_MODE_DWARF = 0x04000000, 885 886 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000, 887 888 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001, 889 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002, 890 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004, 891 892 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008, 893 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010, 894 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020, 895 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040, 896 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080, 897 898 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00, 899 900 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF 901 }; 902 903 } // end CU namespace 904 905 /// Generate compact unwind encoding for the function based on the CFI 906 /// instructions. If the CFI instructions describe a frame that cannot be 907 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which 908 /// tells the runtime to fallback and unwind using dwarf. 909 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding( 910 ArrayRef<MCCFIInstruction> Instrs) const { 911 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n"); 912 // Only armv7k uses CFI based unwinding. 913 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K) 914 return 0; 915 // No .cfi directives means no frame. 916 if (Instrs.empty()) 917 return 0; 918 // Start off assuming CFA is at SP+0. 919 int CFARegister = ARM::SP; 920 int CFARegisterOffset = 0; 921 // Mark savable registers as initially unsaved 922 DenseMap<unsigned, int> RegOffsets; 923 int FloatRegCount = 0; 924 // Process each .cfi directive and build up compact unwind info. 925 for (size_t i = 0, e = Instrs.size(); i != e; ++i) { 926 int Reg; 927 const MCCFIInstruction &Inst = Instrs[i]; 928 switch (Inst.getOperation()) { 929 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa 930 CFARegisterOffset = -Inst.getOffset(); 931 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true); 932 break; 933 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset 934 CFARegisterOffset = -Inst.getOffset(); 935 break; 936 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register 937 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true); 938 break; 939 case MCCFIInstruction::OpOffset: // DW_CFA_offset 940 Reg = MRI.getLLVMRegNum(Inst.getRegister(), true); 941 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 942 RegOffsets[Reg] = Inst.getOffset(); 943 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 944 RegOffsets[Reg] = Inst.getOffset(); 945 ++FloatRegCount; 946 } else { 947 DEBUG_WITH_TYPE("compact-unwind", 948 llvm::dbgs() << ".cfi_offset on unknown register=" 949 << Inst.getRegister() << "\n"); 950 return CU::UNWIND_ARM_MODE_DWARF; 951 } 952 break; 953 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc 954 // Ignore 955 break; 956 default: 957 // Directive not convertable to compact unwind, bail out. 958 DEBUG_WITH_TYPE("compact-unwind", 959 llvm::dbgs() 960 << "CFI directive not compatiable with comact " 961 "unwind encoding, opcode=" << Inst.getOperation() 962 << "\n"); 963 return CU::UNWIND_ARM_MODE_DWARF; 964 break; 965 } 966 } 967 968 // If no frame set up, return no unwind info. 969 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0)) 970 return 0; 971 972 // Verify standard frame (lr/r7) was used. 973 if (CFARegister != ARM::R7) { 974 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is " 975 << CFARegister 976 << " instead of r7\n"); 977 return CU::UNWIND_ARM_MODE_DWARF; 978 } 979 int StackAdjust = CFARegisterOffset - 8; 980 if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) { 981 DEBUG_WITH_TYPE("compact-unwind", 982 llvm::dbgs() 983 << "LR not saved as standard frame, StackAdjust=" 984 << StackAdjust 985 << ", CFARegisterOffset=" << CFARegisterOffset 986 << ", lr save at offset=" << RegOffsets[14] << "\n"); 987 return CU::UNWIND_ARM_MODE_DWARF; 988 } 989 if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) { 990 DEBUG_WITH_TYPE("compact-unwind", 991 llvm::dbgs() << "r7 not saved as standard frame\n"); 992 return CU::UNWIND_ARM_MODE_DWARF; 993 } 994 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME; 995 996 // If var-args are used, there may be a stack adjust required. 997 switch (StackAdjust) { 998 case 0: 999 break; 1000 case 4: 1001 CompactUnwindEncoding |= 0x00400000; 1002 break; 1003 case 8: 1004 CompactUnwindEncoding |= 0x00800000; 1005 break; 1006 case 12: 1007 CompactUnwindEncoding |= 0x00C00000; 1008 break; 1009 default: 1010 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() 1011 << ".cfi_def_cfa stack adjust (" 1012 << StackAdjust << ") out of range\n"); 1013 return CU::UNWIND_ARM_MODE_DWARF; 1014 } 1015 1016 // If r6 is saved, it must be right below r7. 1017 static struct { 1018 unsigned Reg; 1019 unsigned Encoding; 1020 } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6}, 1021 {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5}, 1022 {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4}, 1023 {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12}, 1024 {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11}, 1025 {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10}, 1026 {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9}, 1027 {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}}; 1028 1029 int CurOffset = -8 - StackAdjust; 1030 for (auto CSReg : GPRCSRegs) { 1031 auto Offset = RegOffsets.find(CSReg.Reg); 1032 if (Offset == RegOffsets.end()) 1033 continue; 1034 1035 int RegOffset = Offset->second; 1036 if (RegOffset != CurOffset - 4) { 1037 DEBUG_WITH_TYPE("compact-unwind", 1038 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at " 1039 << RegOffset << " but only supported at " 1040 << CurOffset << "\n"); 1041 return CU::UNWIND_ARM_MODE_DWARF; 1042 } 1043 CompactUnwindEncoding |= CSReg.Encoding; 1044 CurOffset -= 4; 1045 } 1046 1047 // If no floats saved, we are done. 1048 if (FloatRegCount == 0) 1049 return CompactUnwindEncoding; 1050 1051 // Switch mode to include D register saving. 1052 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK; 1053 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D; 1054 1055 // FIXME: supporting more than 4 saved D-registers compactly would be trivial, 1056 // but needs coordination with the linker and libunwind. 1057 if (FloatRegCount > 4) { 1058 DEBUG_WITH_TYPE("compact-unwind", 1059 llvm::dbgs() << "unsupported number of D registers saved (" 1060 << FloatRegCount << ")\n"); 1061 return CU::UNWIND_ARM_MODE_DWARF; 1062 } 1063 1064 // Floating point registers must either be saved sequentially, or we defer to 1065 // DWARF. No gaps allowed here so check that each saved d-register is 1066 // precisely where it should be. 1067 static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 }; 1068 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) { 1069 auto Offset = RegOffsets.find(FPRCSRegs[Idx]); 1070 if (Offset == RegOffsets.end()) { 1071 DEBUG_WITH_TYPE("compact-unwind", 1072 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 1073 << MRI.getName(FPRCSRegs[Idx]) 1074 << " not saved\n"); 1075 return CU::UNWIND_ARM_MODE_DWARF; 1076 } else if (Offset->second != CurOffset - 8) { 1077 DEBUG_WITH_TYPE("compact-unwind", 1078 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 1079 << MRI.getName(FPRCSRegs[Idx]) 1080 << " saved at " << Offset->second 1081 << ", expected at " << CurOffset - 8 1082 << "\n"); 1083 return CU::UNWIND_ARM_MODE_DWARF; 1084 } 1085 CurOffset -= 8; 1086 } 1087 1088 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8); 1089 } 1090 1091 static MachO::CPUSubTypeARM getMachOSubTypeFromArch(StringRef Arch) { 1092 unsigned AK = ARM::parseArch(Arch); 1093 switch (AK) { 1094 default: 1095 return MachO::CPU_SUBTYPE_ARM_V7; 1096 case ARM::AK_ARMV4T: 1097 return MachO::CPU_SUBTYPE_ARM_V4T; 1098 case ARM::AK_ARMV5T: 1099 case ARM::AK_ARMV5TE: 1100 case ARM::AK_ARMV5TEJ: 1101 return MachO::CPU_SUBTYPE_ARM_V5; 1102 case ARM::AK_ARMV6: 1103 case ARM::AK_ARMV6K: 1104 return MachO::CPU_SUBTYPE_ARM_V6; 1105 case ARM::AK_ARMV7A: 1106 return MachO::CPU_SUBTYPE_ARM_V7; 1107 case ARM::AK_ARMV7S: 1108 return MachO::CPU_SUBTYPE_ARM_V7S; 1109 case ARM::AK_ARMV7K: 1110 return MachO::CPU_SUBTYPE_ARM_V7K; 1111 case ARM::AK_ARMV6M: 1112 return MachO::CPU_SUBTYPE_ARM_V6M; 1113 case ARM::AK_ARMV7M: 1114 return MachO::CPU_SUBTYPE_ARM_V7M; 1115 case ARM::AK_ARMV7EM: 1116 return MachO::CPU_SUBTYPE_ARM_V7EM; 1117 } 1118 } 1119 1120 MCAsmBackend *llvm::createARMAsmBackend(const Target &T, 1121 const MCRegisterInfo &MRI, 1122 const Triple &TheTriple, StringRef CPU, 1123 const MCTargetOptions &Options, 1124 bool isLittle) { 1125 switch (TheTriple.getObjectFormat()) { 1126 default: 1127 llvm_unreachable("unsupported object format"); 1128 case Triple::MachO: { 1129 MachO::CPUSubTypeARM CS = getMachOSubTypeFromArch(TheTriple.getArchName()); 1130 return new ARMAsmBackendDarwin(T, TheTriple, MRI, CS); 1131 } 1132 case Triple::COFF: 1133 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported"); 1134 return new ARMAsmBackendWinCOFF(T, TheTriple); 1135 case Triple::ELF: 1136 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target"); 1137 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 1138 return new ARMAsmBackendELF(T, TheTriple, OSABI, isLittle); 1139 } 1140 } 1141 1142 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T, 1143 const MCRegisterInfo &MRI, 1144 const Triple &TT, StringRef CPU, 1145 const MCTargetOptions &Options) { 1146 return createARMAsmBackend(T, MRI, TT, CPU, Options, true); 1147 } 1148 1149 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T, 1150 const MCRegisterInfo &MRI, 1151 const Triple &TT, StringRef CPU, 1152 const MCTargetOptions &Options) { 1153 return createARMAsmBackend(T, MRI, TT, CPU, Options, false); 1154 } 1155 1156 MCAsmBackend *llvm::createThumbLEAsmBackend(const Target &T, 1157 const MCRegisterInfo &MRI, 1158 const Triple &TT, StringRef CPU, 1159 const MCTargetOptions &Options) { 1160 return createARMAsmBackend(T, MRI, TT, CPU, Options, true); 1161 } 1162 1163 MCAsmBackend *llvm::createThumbBEAsmBackend(const Target &T, 1164 const MCRegisterInfo &MRI, 1165 const Triple &TT, StringRef CPU, 1166 const MCTargetOptions &Options) { 1167 return createARMAsmBackend(T, MRI, TT, CPU, Options, false); 1168 } 1169