1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "MCTargetDesc/ARMMCTargetDesc.h" 11 #include "MCTargetDesc/ARMAddressingModes.h" 12 #include "MCTargetDesc/ARMAsmBackend.h" 13 #include "MCTargetDesc/ARMAsmBackendDarwin.h" 14 #include "MCTargetDesc/ARMAsmBackendELF.h" 15 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h" 16 #include "MCTargetDesc/ARMBaseInfo.h" 17 #include "MCTargetDesc/ARMFixupKinds.h" 18 #include "llvm/ADT/StringSwitch.h" 19 #include "llvm/MC/MCAsmBackend.h" 20 #include "llvm/MC/MCAssembler.h" 21 #include "llvm/MC/MCContext.h" 22 #include "llvm/MC/MCDirectives.h" 23 #include "llvm/MC/MCELFObjectWriter.h" 24 #include "llvm/MC/MCExpr.h" 25 #include "llvm/MC/MCFixupKindInfo.h" 26 #include "llvm/MC/MCMachObjectWriter.h" 27 #include "llvm/MC/MCObjectWriter.h" 28 #include "llvm/MC/MCRegisterInfo.h" 29 #include "llvm/MC/MCSectionELF.h" 30 #include "llvm/MC/MCSectionMachO.h" 31 #include "llvm/MC/MCSubtargetInfo.h" 32 #include "llvm/MC/MCValue.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/ELF.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/Format.h" 37 #include "llvm/Support/MachO.h" 38 #include "llvm/Support/TargetParser.h" 39 #include "llvm/Support/raw_ostream.h" 40 using namespace llvm; 41 42 namespace { 43 class ARMELFObjectWriter : public MCELFObjectTargetWriter { 44 public: 45 ARMELFObjectWriter(uint8_t OSABI) 46 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM, 47 /*HasRelocationAddend*/ false) {} 48 }; 49 50 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { 51 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = { 52 // This table *must* be in the order that the fixup_* kinds are defined in 53 // ARMFixupKinds.h. 54 // 55 // Name Offset (bits) Size (bits) Flags 56 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 57 {"fixup_t2_ldst_pcrel_12", 0, 32, 58 MCFixupKindInfo::FKF_IsPCRel | 59 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 60 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 61 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 62 {"fixup_t2_pcrel_10", 0, 32, 63 MCFixupKindInfo::FKF_IsPCRel | 64 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 65 {"fixup_thumb_adr_pcrel_10", 0, 8, 66 MCFixupKindInfo::FKF_IsPCRel | 67 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 68 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 69 {"fixup_t2_adr_pcrel_12", 0, 32, 70 MCFixupKindInfo::FKF_IsPCRel | 71 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 72 {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 73 {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 74 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 75 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 76 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 77 {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 78 {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 79 {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 80 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 81 {"fixup_arm_thumb_blx", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 82 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 83 {"fixup_arm_thumb_cp", 0, 8, 84 MCFixupKindInfo::FKF_IsPCRel | 85 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 86 {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel}, 87 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 88 // - 19. 89 {"fixup_arm_movt_hi16", 0, 20, 0}, 90 {"fixup_arm_movw_lo16", 0, 20, 0}, 91 {"fixup_t2_movt_hi16", 0, 20, 0}, 92 {"fixup_t2_movw_lo16", 0, 20, 0}, 93 }; 94 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = { 95 // This table *must* be in the order that the fixup_* kinds are defined in 96 // ARMFixupKinds.h. 97 // 98 // Name Offset (bits) Size (bits) Flags 99 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 100 {"fixup_t2_ldst_pcrel_12", 0, 32, 101 MCFixupKindInfo::FKF_IsPCRel | 102 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 103 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 104 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 105 {"fixup_t2_pcrel_10", 0, 32, 106 MCFixupKindInfo::FKF_IsPCRel | 107 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 108 {"fixup_thumb_adr_pcrel_10", 8, 8, 109 MCFixupKindInfo::FKF_IsPCRel | 110 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 111 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 112 {"fixup_t2_adr_pcrel_12", 0, 32, 113 MCFixupKindInfo::FKF_IsPCRel | 114 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 115 {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 116 {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 117 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 118 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 119 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 120 {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 121 {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 122 {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 123 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 124 {"fixup_arm_thumb_blx", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 125 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 126 {"fixup_arm_thumb_cp", 8, 8, 127 MCFixupKindInfo::FKF_IsPCRel | 128 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 129 {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel}, 130 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 131 // - 19. 132 {"fixup_arm_movt_hi16", 12, 20, 0}, 133 {"fixup_arm_movw_lo16", 12, 20, 0}, 134 {"fixup_t2_movt_hi16", 12, 20, 0}, 135 {"fixup_t2_movw_lo16", 12, 20, 0}, 136 }; 137 138 if (Kind < FirstTargetFixupKind) 139 return MCAsmBackend::getFixupKindInfo(Kind); 140 141 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 142 "Invalid kind!"); 143 return (IsLittleEndian ? InfosLE : InfosBE)[Kind - FirstTargetFixupKind]; 144 } 145 146 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) { 147 switch (Flag) { 148 default: 149 break; 150 case MCAF_Code16: 151 setIsThumb(true); 152 break; 153 case MCAF_Code32: 154 setIsThumb(false); 155 break; 156 } 157 } 158 } // end anonymous namespace 159 160 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op) const { 161 bool HasThumb2 = STI->getFeatureBits()[ARM::FeatureThumb2]; 162 163 switch (Op) { 164 default: 165 return Op; 166 case ARM::tBcc: 167 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op; 168 case ARM::tLDRpci: 169 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op; 170 case ARM::tADR: 171 return HasThumb2 ? (unsigned)ARM::t2ADR : Op; 172 case ARM::tB: 173 return HasThumb2 ? (unsigned)ARM::t2B : Op; 174 case ARM::tCBZ: 175 return ARM::tHINT; 176 case ARM::tCBNZ: 177 return ARM::tHINT; 178 } 179 } 180 181 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst) const { 182 if (getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode()) 183 return true; 184 return false; 185 } 186 187 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup, 188 uint64_t Value) const { 189 switch ((unsigned)Fixup.getKind()) { 190 case ARM::fixup_arm_thumb_br: { 191 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the 192 // low bit being an implied zero. There's an implied +4 offset for the 193 // branch, so we adjust the other way here to determine what's 194 // encodable. 195 // 196 // Relax if the value is too big for a (signed) i8. 197 int64_t Offset = int64_t(Value) - 4; 198 if (Offset > 2046 || Offset < -2048) 199 return "out of range pc-relative fixup value"; 200 break; 201 } 202 case ARM::fixup_arm_thumb_bcc: { 203 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the 204 // low bit being an implied zero. There's an implied +4 offset for the 205 // branch, so we adjust the other way here to determine what's 206 // encodable. 207 // 208 // Relax if the value is too big for a (signed) i8. 209 int64_t Offset = int64_t(Value) - 4; 210 if (Offset > 254 || Offset < -256) 211 return "out of range pc-relative fixup value"; 212 break; 213 } 214 case ARM::fixup_thumb_adr_pcrel_10: 215 case ARM::fixup_arm_thumb_cp: { 216 // If the immediate is negative, greater than 1020, or not a multiple 217 // of four, the wide version of the instruction must be used. 218 int64_t Offset = int64_t(Value) - 4; 219 if (Offset & 3) 220 return "misaligned pc-relative fixup value"; 221 else if (Offset > 1020 || Offset < 0) 222 return "out of range pc-relative fixup value"; 223 break; 224 } 225 case ARM::fixup_arm_thumb_cb: { 226 // If we have a Thumb CBZ or CBNZ instruction and its target is the next 227 // instruction it is is actually out of range for the instruction. 228 // It will be changed to a NOP. 229 int64_t Offset = (Value & ~1); 230 if (Offset == 2) 231 return "will be converted to nop"; 232 break; 233 } 234 default: 235 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!"); 236 } 237 return nullptr; 238 } 239 240 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 241 const MCRelaxableFragment *DF, 242 const MCAsmLayout &Layout) const { 243 return reasonForFixupRelaxation(Fixup, Value); 244 } 245 246 void ARMAsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const { 247 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode()); 248 249 // Sanity check w/ diagnostic if we get here w/ a bogus instruction. 250 if (RelaxedOp == Inst.getOpcode()) { 251 SmallString<256> Tmp; 252 raw_svector_ostream OS(Tmp); 253 Inst.dump_pretty(OS); 254 OS << "\n"; 255 report_fatal_error("unexpected instruction to relax: " + OS.str()); 256 } 257 258 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we 259 // have to change the operands too. 260 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) && 261 RelaxedOp == ARM::tHINT) { 262 Res.setOpcode(RelaxedOp); 263 Res.addOperand(MCOperand::createImm(0)); 264 Res.addOperand(MCOperand::createImm(14)); 265 Res.addOperand(MCOperand::createReg(0)); 266 return; 267 } 268 269 // The rest of instructions we're relaxing have the same operands. 270 // We just need to update to the proper opcode. 271 Res = Inst; 272 Res.setOpcode(RelaxedOp); 273 } 274 275 bool ARMAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { 276 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8 277 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP 278 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0 279 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP 280 if (isThumb()) { 281 const uint16_t nopEncoding = 282 hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding; 283 uint64_t NumNops = Count / 2; 284 for (uint64_t i = 0; i != NumNops; ++i) 285 OW->write16(nopEncoding); 286 if (Count & 1) 287 OW->write8(0); 288 return true; 289 } 290 // ARM mode 291 const uint32_t nopEncoding = 292 hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding; 293 uint64_t NumNops = Count / 4; 294 for (uint64_t i = 0; i != NumNops; ++i) 295 OW->write32(nopEncoding); 296 // FIXME: should this function return false when unable to write exactly 297 // 'Count' bytes with NOP encodings? 298 switch (Count % 4) { 299 default: 300 break; // No leftover bytes to write 301 case 1: 302 OW->write8(0); 303 break; 304 case 2: 305 OW->write16(0); 306 break; 307 case 3: 308 OW->write16(0); 309 OW->write8(0xa0); 310 break; 311 } 312 313 return true; 314 } 315 316 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) { 317 if (IsLittleEndian) { 318 // Note that the halfwords are stored high first and low second in thumb; 319 // so we need to swap the fixup value here to map properly. 320 uint32_t Swapped = (Value & 0xFFFF0000) >> 16; 321 Swapped |= (Value & 0x0000FFFF) << 16; 322 return Swapped; 323 } else 324 return Value; 325 } 326 327 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf, 328 bool IsLittleEndian) { 329 uint32_t Value; 330 331 if (IsLittleEndian) { 332 Value = (SecondHalf & 0xFFFF) << 16; 333 Value |= (FirstHalf & 0xFFFF); 334 } else { 335 Value = (SecondHalf & 0xFFFF); 336 Value |= (FirstHalf & 0xFFFF) << 16; 337 } 338 339 return Value; 340 } 341 342 unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value, 343 bool IsPCRel, MCContext *Ctx, 344 bool IsLittleEndian, 345 bool IsResolved) const { 346 unsigned Kind = Fixup.getKind(); 347 switch (Kind) { 348 default: 349 llvm_unreachable("Unknown fixup kind!"); 350 case FK_Data_1: 351 case FK_Data_2: 352 case FK_Data_4: 353 return Value; 354 case FK_SecRel_2: 355 return Value; 356 case FK_SecRel_4: 357 return Value; 358 case ARM::fixup_arm_movt_hi16: 359 if (!IsPCRel) 360 Value >>= 16; 361 // Fallthrough 362 case ARM::fixup_arm_movw_lo16: { 363 unsigned Hi4 = (Value & 0xF000) >> 12; 364 unsigned Lo12 = Value & 0x0FFF; 365 // inst{19-16} = Hi4; 366 // inst{11-0} = Lo12; 367 Value = (Hi4 << 16) | (Lo12); 368 return Value; 369 } 370 case ARM::fixup_t2_movt_hi16: 371 if (!IsPCRel) 372 Value >>= 16; 373 // Fallthrough 374 case ARM::fixup_t2_movw_lo16: { 375 unsigned Hi4 = (Value & 0xF000) >> 12; 376 unsigned i = (Value & 0x800) >> 11; 377 unsigned Mid3 = (Value & 0x700) >> 8; 378 unsigned Lo8 = Value & 0x0FF; 379 // inst{19-16} = Hi4; 380 // inst{26} = i; 381 // inst{14-12} = Mid3; 382 // inst{7-0} = Lo8; 383 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8); 384 return swapHalfWords(Value, IsLittleEndian); 385 } 386 case ARM::fixup_arm_ldst_pcrel_12: 387 // ARM PC-relative values are offset by 8. 388 Value -= 4; 389 // FALLTHROUGH 390 case ARM::fixup_t2_ldst_pcrel_12: { 391 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 392 Value -= 4; 393 bool isAdd = true; 394 if ((int64_t)Value < 0) { 395 Value = -Value; 396 isAdd = false; 397 } 398 if (Ctx && Value >= 4096) 399 Ctx->reportFatalError(Fixup.getLoc(), "out of range pc-relative fixup value"); 400 Value |= isAdd << 23; 401 402 // Same addressing mode as fixup_arm_pcrel_10, 403 // but with 16-bit halfwords swapped. 404 if (Kind == ARM::fixup_t2_ldst_pcrel_12) 405 return swapHalfWords(Value, IsLittleEndian); 406 407 return Value; 408 } 409 case ARM::fixup_arm_adr_pcrel_12: { 410 // ARM PC-relative values are offset by 8. 411 Value -= 8; 412 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100 413 if ((int64_t)Value < 0) { 414 Value = -Value; 415 opc = 2; // 0b0010 416 } 417 if (Ctx && ARM_AM::getSOImmVal(Value) == -1) 418 Ctx->reportFatalError(Fixup.getLoc(), "out of range pc-relative fixup value"); 419 // Encode the immediate and shift the opcode into place. 420 return ARM_AM::getSOImmVal(Value) | (opc << 21); 421 } 422 423 case ARM::fixup_t2_adr_pcrel_12: { 424 Value -= 4; 425 unsigned opc = 0; 426 if ((int64_t)Value < 0) { 427 Value = -Value; 428 opc = 5; 429 } 430 431 uint32_t out = (opc << 21); 432 out |= (Value & 0x800) << 15; 433 out |= (Value & 0x700) << 4; 434 out |= (Value & 0x0FF); 435 436 return swapHalfWords(out, IsLittleEndian); 437 } 438 439 case ARM::fixup_arm_condbranch: 440 case ARM::fixup_arm_uncondbranch: 441 case ARM::fixup_arm_uncondbl: 442 case ARM::fixup_arm_condbl: 443 case ARM::fixup_arm_blx: 444 // These values don't encode the low two bits since they're always zero. 445 // Offset by 8 just as above. 446 if (const MCSymbolRefExpr *SRE = 447 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 448 if (SRE->getKind() == MCSymbolRefExpr::VK_ARM_TLSCALL) 449 return 0; 450 return 0xffffff & ((Value - 8) >> 2); 451 case ARM::fixup_t2_uncondbranch: { 452 Value = Value - 4; 453 Value >>= 1; // Low bit is not encoded. 454 455 uint32_t out = 0; 456 bool I = Value & 0x800000; 457 bool J1 = Value & 0x400000; 458 bool J2 = Value & 0x200000; 459 J1 ^= I; 460 J2 ^= I; 461 462 out |= I << 26; // S bit 463 out |= !J1 << 13; // J1 bit 464 out |= !J2 << 11; // J2 bit 465 out |= (Value & 0x1FF800) << 5; // imm6 field 466 out |= (Value & 0x0007FF); // imm11 field 467 468 return swapHalfWords(out, IsLittleEndian); 469 } 470 case ARM::fixup_t2_condbranch: { 471 Value = Value - 4; 472 Value >>= 1; // Low bit is not encoded. 473 474 uint64_t out = 0; 475 out |= (Value & 0x80000) << 7; // S bit 476 out |= (Value & 0x40000) >> 7; // J2 bit 477 out |= (Value & 0x20000) >> 4; // J1 bit 478 out |= (Value & 0x1F800) << 5; // imm6 field 479 out |= (Value & 0x007FF); // imm11 field 480 481 return swapHalfWords(out, IsLittleEndian); 482 } 483 case ARM::fixup_arm_thumb_bl: { 484 // The value doesn't encode the low bit (always zero) and is offset by 485 // four. The 32-bit immediate value is encoded as 486 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0) 487 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 488 // The value is encoded into disjoint bit positions in the destination 489 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 490 // J = either J1 or J2 bit 491 // 492 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII 493 // 494 // Note that the halfwords are stored high first, low second; so we need 495 // to transpose the fixup value here to map properly. 496 uint32_t offset = (Value - 4) >> 1; 497 uint32_t signBit = (offset & 0x800000) >> 23; 498 uint32_t I1Bit = (offset & 0x400000) >> 22; 499 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 500 uint32_t I2Bit = (offset & 0x200000) >> 21; 501 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 502 uint32_t imm10Bits = (offset & 0x1FF800) >> 11; 503 uint32_t imm11Bits = (offset & 0x000007FF); 504 505 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits); 506 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 507 (uint16_t)imm11Bits); 508 return joinHalfWords(FirstHalf, SecondHalf, IsLittleEndian); 509 } 510 case ARM::fixup_arm_thumb_blx: { 511 // The value doesn't encode the low two bits (always zero) and is offset by 512 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as 513 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00) 514 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 515 // The value is encoded into disjoint bit positions in the destination 516 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 517 // J = either J1 or J2 bit, 0 = zero. 518 // 519 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0 520 // 521 // Note that the halfwords are stored high first, low second; so we need 522 // to transpose the fixup value here to map properly. 523 uint32_t offset = (Value - 2) >> 2; 524 if (const MCSymbolRefExpr *SRE = 525 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 526 if (SRE->getKind() == MCSymbolRefExpr::VK_ARM_TLSCALL) 527 offset = 0; 528 uint32_t signBit = (offset & 0x400000) >> 22; 529 uint32_t I1Bit = (offset & 0x200000) >> 21; 530 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 531 uint32_t I2Bit = (offset & 0x100000) >> 20; 532 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 533 uint32_t imm10HBits = (offset & 0xFFC00) >> 10; 534 uint32_t imm10LBits = (offset & 0x3FF); 535 536 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits); 537 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 538 ((uint16_t)imm10LBits) << 1); 539 return joinHalfWords(FirstHalf, SecondHalf, IsLittleEndian); 540 } 541 case ARM::fixup_thumb_adr_pcrel_10: 542 case ARM::fixup_arm_thumb_cp: 543 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we 544 // could have an error on our hands. 545 if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) { 546 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 547 if (FixupDiagnostic) 548 Ctx->reportFatalError(Fixup.getLoc(), FixupDiagnostic); 549 } 550 // Offset by 4, and don't encode the low two bits. 551 return ((Value - 4) >> 2) & 0xff; 552 case ARM::fixup_arm_thumb_cb: { 553 // Offset by 4 and don't encode the lower bit, which is always 0. 554 // FIXME: diagnose if no Thumb2 555 uint32_t Binary = (Value - 4) >> 1; 556 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3); 557 } 558 case ARM::fixup_arm_thumb_br: 559 // Offset by 4 and don't encode the lower bit, which is always 0. 560 if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2]) { 561 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 562 if (FixupDiagnostic) 563 Ctx->reportFatalError(Fixup.getLoc(), FixupDiagnostic); 564 } 565 return ((Value - 4) >> 1) & 0x7ff; 566 case ARM::fixup_arm_thumb_bcc: 567 // Offset by 4 and don't encode the lower bit, which is always 0. 568 if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2]) { 569 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 570 if (FixupDiagnostic) 571 Ctx->reportFatalError(Fixup.getLoc(), FixupDiagnostic); 572 } 573 return ((Value - 4) >> 1) & 0xff; 574 case ARM::fixup_arm_pcrel_10_unscaled: { 575 Value = Value - 8; // ARM fixups offset by an additional word and don't 576 // need to adjust for the half-word ordering. 577 bool isAdd = true; 578 if ((int64_t)Value < 0) { 579 Value = -Value; 580 isAdd = false; 581 } 582 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8]. 583 if (Ctx && Value >= 256) 584 Ctx->reportFatalError(Fixup.getLoc(), "out of range pc-relative fixup value"); 585 Value = (Value & 0xf) | ((Value & 0xf0) << 4); 586 return Value | (isAdd << 23); 587 } 588 case ARM::fixup_arm_pcrel_10: 589 Value = Value - 4; // ARM fixups offset by an additional word and don't 590 // need to adjust for the half-word ordering. 591 // Fall through. 592 case ARM::fixup_t2_pcrel_10: { 593 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 594 Value = Value - 4; 595 bool isAdd = true; 596 if ((int64_t)Value < 0) { 597 Value = -Value; 598 isAdd = false; 599 } 600 // These values don't encode the low two bits since they're always zero. 601 Value >>= 2; 602 if (Ctx && Value >= 256) 603 Ctx->reportFatalError(Fixup.getLoc(), "out of range pc-relative fixup value"); 604 Value |= isAdd << 23; 605 606 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords 607 // swapped. 608 if (Kind == ARM::fixup_t2_pcrel_10) 609 return swapHalfWords(Value, IsLittleEndian); 610 611 return Value; 612 } 613 } 614 } 615 616 void ARMAsmBackend::processFixupValue(const MCAssembler &Asm, 617 const MCAsmLayout &Layout, 618 const MCFixup &Fixup, 619 const MCFragment *DF, 620 const MCValue &Target, uint64_t &Value, 621 bool &IsResolved) { 622 const MCSymbolRefExpr *A = Target.getSymA(); 623 const MCSymbol *Sym = A ? &A->getSymbol() : nullptr; 624 // Some fixups to thumb function symbols need the low bit (thumb bit) 625 // twiddled. 626 if ((unsigned)Fixup.getKind() != ARM::fixup_arm_ldst_pcrel_12 && 627 (unsigned)Fixup.getKind() != ARM::fixup_t2_ldst_pcrel_12 && 628 (unsigned)Fixup.getKind() != ARM::fixup_arm_adr_pcrel_12 && 629 (unsigned)Fixup.getKind() != ARM::fixup_thumb_adr_pcrel_10 && 630 (unsigned)Fixup.getKind() != ARM::fixup_t2_adr_pcrel_12 && 631 (unsigned)Fixup.getKind() != ARM::fixup_arm_thumb_cp) { 632 if (Sym) { 633 if (Asm.isThumbFunc(Sym)) 634 Value |= 1; 635 } 636 } 637 if (IsResolved && (unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_bl) { 638 assert(Sym && "How did we resolve this?"); 639 640 // If the symbol is external the linker will handle it. 641 // FIXME: Should we handle it as an optimization? 642 643 // If the symbol is out of range, produce a relocation and hope the 644 // linker can handle it. GNU AS produces an error in this case. 645 if (Sym->isExternal() || Value >= 0x400004) 646 IsResolved = false; 647 } 648 // We must always generate a relocation for BL/BLX instructions if we have 649 // a symbol to reference, as the linker relies on knowing the destination 650 // symbol's thumb-ness to get interworking right. 651 if (A && ((unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_blx || 652 (unsigned)Fixup.getKind() == ARM::fixup_arm_blx || 653 (unsigned)Fixup.getKind() == ARM::fixup_arm_uncondbl || 654 (unsigned)Fixup.getKind() == ARM::fixup_arm_condbl)) 655 IsResolved = false; 656 657 // Try to get the encoded value for the fixup as-if we're mapping it into 658 // the instruction. This allows adjustFixupValue() to issue a diagnostic 659 // if the value aren't invalid. 660 (void)adjustFixupValue(Fixup, Value, false, &Asm.getContext(), 661 IsLittleEndian, IsResolved); 662 } 663 664 /// getFixupKindNumBytes - The number of bytes the fixup may change. 665 static unsigned getFixupKindNumBytes(unsigned Kind) { 666 switch (Kind) { 667 default: 668 llvm_unreachable("Unknown fixup kind!"); 669 670 case FK_Data_1: 671 case ARM::fixup_arm_thumb_bcc: 672 case ARM::fixup_arm_thumb_cp: 673 case ARM::fixup_thumb_adr_pcrel_10: 674 return 1; 675 676 case FK_Data_2: 677 case ARM::fixup_arm_thumb_br: 678 case ARM::fixup_arm_thumb_cb: 679 return 2; 680 681 case ARM::fixup_arm_pcrel_10_unscaled: 682 case ARM::fixup_arm_ldst_pcrel_12: 683 case ARM::fixup_arm_pcrel_10: 684 case ARM::fixup_arm_adr_pcrel_12: 685 case ARM::fixup_arm_uncondbl: 686 case ARM::fixup_arm_condbl: 687 case ARM::fixup_arm_blx: 688 case ARM::fixup_arm_condbranch: 689 case ARM::fixup_arm_uncondbranch: 690 return 3; 691 692 case FK_Data_4: 693 case ARM::fixup_t2_ldst_pcrel_12: 694 case ARM::fixup_t2_condbranch: 695 case ARM::fixup_t2_uncondbranch: 696 case ARM::fixup_t2_pcrel_10: 697 case ARM::fixup_t2_adr_pcrel_12: 698 case ARM::fixup_arm_thumb_bl: 699 case ARM::fixup_arm_thumb_blx: 700 case ARM::fixup_arm_movt_hi16: 701 case ARM::fixup_arm_movw_lo16: 702 case ARM::fixup_t2_movt_hi16: 703 case ARM::fixup_t2_movw_lo16: 704 return 4; 705 706 case FK_SecRel_2: 707 return 2; 708 case FK_SecRel_4: 709 return 4; 710 } 711 } 712 713 /// getFixupKindContainerSizeBytes - The number of bytes of the 714 /// container involved in big endian. 715 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) { 716 switch (Kind) { 717 default: 718 llvm_unreachable("Unknown fixup kind!"); 719 720 case FK_Data_1: 721 return 1; 722 case FK_Data_2: 723 return 2; 724 case FK_Data_4: 725 return 4; 726 727 case ARM::fixup_arm_thumb_bcc: 728 case ARM::fixup_arm_thumb_cp: 729 case ARM::fixup_thumb_adr_pcrel_10: 730 case ARM::fixup_arm_thumb_br: 731 case ARM::fixup_arm_thumb_cb: 732 // Instruction size is 2 bytes. 733 return 2; 734 735 case ARM::fixup_arm_pcrel_10_unscaled: 736 case ARM::fixup_arm_ldst_pcrel_12: 737 case ARM::fixup_arm_pcrel_10: 738 case ARM::fixup_arm_adr_pcrel_12: 739 case ARM::fixup_arm_uncondbl: 740 case ARM::fixup_arm_condbl: 741 case ARM::fixup_arm_blx: 742 case ARM::fixup_arm_condbranch: 743 case ARM::fixup_arm_uncondbranch: 744 case ARM::fixup_t2_ldst_pcrel_12: 745 case ARM::fixup_t2_condbranch: 746 case ARM::fixup_t2_uncondbranch: 747 case ARM::fixup_t2_pcrel_10: 748 case ARM::fixup_t2_adr_pcrel_12: 749 case ARM::fixup_arm_thumb_bl: 750 case ARM::fixup_arm_thumb_blx: 751 case ARM::fixup_arm_movt_hi16: 752 case ARM::fixup_arm_movw_lo16: 753 case ARM::fixup_t2_movt_hi16: 754 case ARM::fixup_t2_movw_lo16: 755 // Instruction size is 4 bytes. 756 return 4; 757 } 758 } 759 760 void ARMAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, 761 unsigned DataSize, uint64_t Value, 762 bool IsPCRel) const { 763 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); 764 Value = 765 adjustFixupValue(Fixup, Value, IsPCRel, nullptr, IsLittleEndian, true); 766 if (!Value) 767 return; // Doesn't change encoding. 768 769 unsigned Offset = Fixup.getOffset(); 770 assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!"); 771 772 // Used to point to big endian bytes. 773 unsigned FullSizeBytes; 774 if (!IsLittleEndian) { 775 FullSizeBytes = getFixupKindContainerSizeBytes(Fixup.getKind()); 776 assert((Offset + FullSizeBytes) <= DataSize && "Invalid fixup size!"); 777 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!"); 778 } 779 780 // For each byte of the fragment that the fixup touches, mask in the bits from 781 // the fixup value. The Value has been "split up" into the appropriate 782 // bitfields above. 783 for (unsigned i = 0; i != NumBytes; ++i) { 784 unsigned Idx = IsLittleEndian ? i : (FullSizeBytes - 1 - i); 785 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); 786 } 787 } 788 789 namespace CU { 790 791 /// \brief Compact unwind encoding values. 792 enum CompactUnwindEncodings { 793 UNWIND_ARM_MODE_MASK = 0x0F000000, 794 UNWIND_ARM_MODE_FRAME = 0x01000000, 795 UNWIND_ARM_MODE_FRAME_D = 0x02000000, 796 UNWIND_ARM_MODE_DWARF = 0x04000000, 797 798 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000, 799 800 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001, 801 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002, 802 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004, 803 804 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008, 805 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010, 806 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020, 807 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040, 808 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080, 809 810 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00, 811 812 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF 813 }; 814 815 } // end CU namespace 816 817 /// Generate compact unwind encoding for the function based on the CFI 818 /// instructions. If the CFI instructions describe a frame that cannot be 819 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which 820 /// tells the runtime to fallback and unwind using dwarf. 821 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding( 822 ArrayRef<MCCFIInstruction> Instrs) const { 823 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n"); 824 // Only armv7k uses CFI based unwinding. 825 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K) 826 return 0; 827 // No .cfi directives means no frame. 828 if (Instrs.empty()) 829 return 0; 830 // Start off assuming CFA is at SP+0. 831 int CFARegister = ARM::SP; 832 int CFARegisterOffset = 0; 833 // Mark savable registers as initially unsaved 834 DenseMap<unsigned, int> RegOffsets; 835 int FloatRegCount = 0; 836 // Process each .cfi directive and build up compact unwind info. 837 for (size_t i = 0, e = Instrs.size(); i != e; ++i) { 838 int Reg; 839 const MCCFIInstruction &Inst = Instrs[i]; 840 switch (Inst.getOperation()) { 841 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa 842 CFARegisterOffset = -Inst.getOffset(); 843 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true); 844 break; 845 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset 846 CFARegisterOffset = -Inst.getOffset(); 847 break; 848 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register 849 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true); 850 break; 851 case MCCFIInstruction::OpOffset: // DW_CFA_offset 852 Reg = MRI.getLLVMRegNum(Inst.getRegister(), true); 853 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 854 RegOffsets[Reg] = Inst.getOffset(); 855 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 856 RegOffsets[Reg] = Inst.getOffset(); 857 ++FloatRegCount; 858 } else { 859 DEBUG_WITH_TYPE("compact-unwind", 860 llvm::dbgs() << ".cfi_offset on unknown register=" 861 << Inst.getRegister() << "\n"); 862 return CU::UNWIND_ARM_MODE_DWARF; 863 } 864 break; 865 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc 866 // Ignore 867 break; 868 default: 869 // Directive not convertable to compact unwind, bail out. 870 DEBUG_WITH_TYPE("compact-unwind", 871 llvm::dbgs() 872 << "CFI directive not compatiable with comact " 873 "unwind encoding, opcode=" << Inst.getOperation() 874 << "\n"); 875 return CU::UNWIND_ARM_MODE_DWARF; 876 break; 877 } 878 } 879 880 // If no frame set up, return no unwind info. 881 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0)) 882 return 0; 883 884 // Verify standard frame (lr/r7) was used. 885 if (CFARegister != ARM::R7) { 886 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is " 887 << CFARegister 888 << " instead of r7\n"); 889 return CU::UNWIND_ARM_MODE_DWARF; 890 } 891 int StackAdjust = CFARegisterOffset - 8; 892 if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) { 893 DEBUG_WITH_TYPE("compact-unwind", 894 llvm::dbgs() 895 << "LR not saved as standard frame, StackAdjust=" 896 << StackAdjust 897 << ", CFARegisterOffset=" << CFARegisterOffset 898 << ", lr save at offset=" << RegOffsets[14] << "\n"); 899 return CU::UNWIND_ARM_MODE_DWARF; 900 } 901 if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) { 902 DEBUG_WITH_TYPE("compact-unwind", 903 llvm::dbgs() << "r7 not saved as standard frame\n"); 904 return CU::UNWIND_ARM_MODE_DWARF; 905 } 906 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME; 907 908 // If var-args are used, there may be a stack adjust required. 909 switch (StackAdjust) { 910 case 0: 911 break; 912 case 4: 913 CompactUnwindEncoding |= 0x00400000; 914 break; 915 case 8: 916 CompactUnwindEncoding |= 0x00800000; 917 break; 918 case 12: 919 CompactUnwindEncoding |= 0x00C00000; 920 break; 921 default: 922 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() 923 << ".cfi_def_cfa stack adjust (" 924 << StackAdjust << ") out of range\n"); 925 return CU::UNWIND_ARM_MODE_DWARF; 926 } 927 928 // If r6 is saved, it must be right below r7. 929 static struct { 930 unsigned Reg; 931 unsigned Encoding; 932 } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6}, 933 {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5}, 934 {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4}, 935 {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12}, 936 {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11}, 937 {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10}, 938 {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9}, 939 {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}}; 940 941 int CurOffset = -8 - StackAdjust; 942 for (auto CSReg : GPRCSRegs) { 943 auto Offset = RegOffsets.find(CSReg.Reg); 944 if (Offset == RegOffsets.end()) 945 continue; 946 947 int RegOffset = Offset->second; 948 if (RegOffset != CurOffset - 4) { 949 DEBUG_WITH_TYPE("compact-unwind", 950 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at " 951 << RegOffset << " but only supported at " 952 << CurOffset << "\n"); 953 return CU::UNWIND_ARM_MODE_DWARF; 954 } 955 CompactUnwindEncoding |= CSReg.Encoding; 956 CurOffset -= 4; 957 } 958 959 // If no floats saved, we are done. 960 if (FloatRegCount == 0) 961 return CompactUnwindEncoding; 962 963 // Switch mode to include D register saving. 964 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK; 965 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D; 966 967 // FIXME: supporting more than 4 saved D-registers compactly would be trivial, 968 // but needs coordination with the linker and libunwind. 969 if (FloatRegCount > 4) { 970 DEBUG_WITH_TYPE("compact-unwind", 971 llvm::dbgs() << "unsupported number of D registers saved (" 972 << FloatRegCount << ")\n"); 973 return CU::UNWIND_ARM_MODE_DWARF; 974 } 975 976 // Floating point registers must either be saved sequentially, or we defer to 977 // DWARF. No gaps allowed here so check that each saved d-register is 978 // precisely where it should be. 979 static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 }; 980 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) { 981 auto Offset = RegOffsets.find(FPRCSRegs[Idx]); 982 if (Offset == RegOffsets.end()) { 983 DEBUG_WITH_TYPE("compact-unwind", 984 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 985 << MRI.getName(FPRCSRegs[Idx]) 986 << " not saved\n"); 987 return CU::UNWIND_ARM_MODE_DWARF; 988 } else if (Offset->second != CurOffset - 8) { 989 DEBUG_WITH_TYPE("compact-unwind", 990 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 991 << MRI.getName(FPRCSRegs[Idx]) 992 << " saved at " << Offset->second 993 << ", expected at " << CurOffset - 8 994 << "\n"); 995 return CU::UNWIND_ARM_MODE_DWARF; 996 } 997 CurOffset -= 8; 998 } 999 1000 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8); 1001 } 1002 1003 static MachO::CPUSubTypeARM getMachOSubTypeFromArch(StringRef Arch) { 1004 unsigned AK = ARM::parseArch(Arch); 1005 switch (AK) { 1006 default: 1007 return MachO::CPU_SUBTYPE_ARM_V7; 1008 case ARM::AK_ARMV4T: 1009 return MachO::CPU_SUBTYPE_ARM_V4T; 1010 case ARM::AK_ARMV6: 1011 case ARM::AK_ARMV6K: 1012 return MachO::CPU_SUBTYPE_ARM_V6; 1013 case ARM::AK_ARMV5: 1014 return MachO::CPU_SUBTYPE_ARM_V5; 1015 case ARM::AK_ARMV5T: 1016 case ARM::AK_ARMV5E: 1017 case ARM::AK_ARMV5TE: 1018 case ARM::AK_ARMV5TEJ: 1019 return MachO::CPU_SUBTYPE_ARM_V5TEJ; 1020 case ARM::AK_ARMV7: 1021 return MachO::CPU_SUBTYPE_ARM_V7; 1022 case ARM::AK_ARMV7S: 1023 return MachO::CPU_SUBTYPE_ARM_V7S; 1024 case ARM::AK_ARMV7K: 1025 return MachO::CPU_SUBTYPE_ARM_V7K; 1026 case ARM::AK_ARMV6M: 1027 case ARM::AK_ARMV6SM: 1028 return MachO::CPU_SUBTYPE_ARM_V6M; 1029 case ARM::AK_ARMV7M: 1030 return MachO::CPU_SUBTYPE_ARM_V7M; 1031 case ARM::AK_ARMV7EM: 1032 return MachO::CPU_SUBTYPE_ARM_V7EM; 1033 } 1034 } 1035 1036 MCAsmBackend *llvm::createARMAsmBackend(const Target &T, 1037 const MCRegisterInfo &MRI, 1038 const Triple &TheTriple, StringRef CPU, 1039 bool isLittle) { 1040 switch (TheTriple.getObjectFormat()) { 1041 default: 1042 llvm_unreachable("unsupported object format"); 1043 case Triple::MachO: { 1044 MachO::CPUSubTypeARM CS = getMachOSubTypeFromArch(TheTriple.getArchName()); 1045 return new ARMAsmBackendDarwin(T, TheTriple, MRI, CS); 1046 } 1047 case Triple::COFF: 1048 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported"); 1049 return new ARMAsmBackendWinCOFF(T, TheTriple); 1050 case Triple::ELF: 1051 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target"); 1052 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 1053 return new ARMAsmBackendELF(T, TheTriple, OSABI, isLittle); 1054 } 1055 } 1056 1057 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T, 1058 const MCRegisterInfo &MRI, 1059 const Triple &TT, StringRef CPU) { 1060 return createARMAsmBackend(T, MRI, TT, CPU, true); 1061 } 1062 1063 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T, 1064 const MCRegisterInfo &MRI, 1065 const Triple &TT, StringRef CPU) { 1066 return createARMAsmBackend(T, MRI, TT, CPU, false); 1067 } 1068 1069 MCAsmBackend *llvm::createThumbLEAsmBackend(const Target &T, 1070 const MCRegisterInfo &MRI, 1071 const Triple &TT, StringRef CPU) { 1072 return createARMAsmBackend(T, MRI, TT, CPU, true); 1073 } 1074 1075 MCAsmBackend *llvm::createThumbBEAsmBackend(const Target &T, 1076 const MCRegisterInfo &MRI, 1077 const Triple &TT, StringRef CPU) { 1078 return createARMAsmBackend(T, MRI, TT, CPU, false); 1079 } 1080