1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "MCTargetDesc/ARMMCTargetDesc.h" 11 #include "MCTargetDesc/ARMAddressingModes.h" 12 #include "MCTargetDesc/ARMAsmBackend.h" 13 #include "MCTargetDesc/ARMAsmBackendDarwin.h" 14 #include "MCTargetDesc/ARMAsmBackendELF.h" 15 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h" 16 #include "MCTargetDesc/ARMBaseInfo.h" 17 #include "MCTargetDesc/ARMFixupKinds.h" 18 #include "llvm/ADT/StringSwitch.h" 19 #include "llvm/MC/MCAsmBackend.h" 20 #include "llvm/MC/MCAssembler.h" 21 #include "llvm/MC/MCContext.h" 22 #include "llvm/MC/MCDirectives.h" 23 #include "llvm/MC/MCELFObjectWriter.h" 24 #include "llvm/MC/MCExpr.h" 25 #include "llvm/MC/MCFixupKindInfo.h" 26 #include "llvm/MC/MCMachObjectWriter.h" 27 #include "llvm/MC/MCObjectWriter.h" 28 #include "llvm/MC/MCRegisterInfo.h" 29 #include "llvm/MC/MCSectionELF.h" 30 #include "llvm/MC/MCSectionMachO.h" 31 #include "llvm/MC/MCSubtargetInfo.h" 32 #include "llvm/MC/MCValue.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/ELF.h" 35 #include "llvm/Support/ErrorHandling.h" 36 #include "llvm/Support/Format.h" 37 #include "llvm/Support/MachO.h" 38 #include "llvm/Support/TargetParser.h" 39 #include "llvm/Support/raw_ostream.h" 40 using namespace llvm; 41 42 namespace { 43 class ARMELFObjectWriter : public MCELFObjectTargetWriter { 44 public: 45 ARMELFObjectWriter(uint8_t OSABI) 46 : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM, 47 /*HasRelocationAddend*/ false) {} 48 }; 49 50 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const { 51 const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = { 52 // This table *must* be in the order that the fixup_* kinds are defined in 53 // ARMFixupKinds.h. 54 // 55 // Name Offset (bits) Size (bits) Flags 56 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 57 {"fixup_t2_ldst_pcrel_12", 0, 32, 58 MCFixupKindInfo::FKF_IsPCRel | 59 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 60 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 61 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 62 {"fixup_t2_pcrel_10", 0, 32, 63 MCFixupKindInfo::FKF_IsPCRel | 64 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 65 {"fixup_thumb_adr_pcrel_10", 0, 8, 66 MCFixupKindInfo::FKF_IsPCRel | 67 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 68 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 69 {"fixup_t2_adr_pcrel_12", 0, 32, 70 MCFixupKindInfo::FKF_IsPCRel | 71 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 72 {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 73 {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 74 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 75 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 76 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 77 {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 78 {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 79 {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel}, 80 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 81 {"fixup_arm_thumb_blx", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 82 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 83 {"fixup_arm_thumb_cp", 0, 8, 84 MCFixupKindInfo::FKF_IsPCRel | 85 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 86 {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel}, 87 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 88 // - 19. 89 {"fixup_arm_movt_hi16", 0, 20, 0}, 90 {"fixup_arm_movw_lo16", 0, 20, 0}, 91 {"fixup_t2_movt_hi16", 0, 20, 0}, 92 {"fixup_t2_movw_lo16", 0, 20, 0}, 93 }; 94 const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = { 95 // This table *must* be in the order that the fixup_* kinds are defined in 96 // ARMFixupKinds.h. 97 // 98 // Name Offset (bits) Size (bits) Flags 99 {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 100 {"fixup_t2_ldst_pcrel_12", 0, 32, 101 MCFixupKindInfo::FKF_IsPCRel | 102 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 103 {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 104 {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 105 {"fixup_t2_pcrel_10", 0, 32, 106 MCFixupKindInfo::FKF_IsPCRel | 107 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 108 {"fixup_thumb_adr_pcrel_10", 8, 8, 109 MCFixupKindInfo::FKF_IsPCRel | 110 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 111 {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 112 {"fixup_t2_adr_pcrel_12", 0, 32, 113 MCFixupKindInfo::FKF_IsPCRel | 114 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 115 {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 116 {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 117 {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 118 {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 119 {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 120 {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 121 {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 122 {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel}, 123 {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 124 {"fixup_arm_thumb_blx", 0, 32, MCFixupKindInfo::FKF_IsPCRel}, 125 {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel}, 126 {"fixup_arm_thumb_cp", 8, 8, 127 MCFixupKindInfo::FKF_IsPCRel | 128 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits}, 129 {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel}, 130 // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16 131 // - 19. 132 {"fixup_arm_movt_hi16", 12, 20, 0}, 133 {"fixup_arm_movw_lo16", 12, 20, 0}, 134 {"fixup_t2_movt_hi16", 12, 20, 0}, 135 {"fixup_t2_movw_lo16", 12, 20, 0}, 136 }; 137 138 if (Kind < FirstTargetFixupKind) 139 return MCAsmBackend::getFixupKindInfo(Kind); 140 141 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 142 "Invalid kind!"); 143 return (IsLittleEndian ? InfosLE : InfosBE)[Kind - FirstTargetFixupKind]; 144 } 145 146 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) { 147 switch (Flag) { 148 default: 149 break; 150 case MCAF_Code16: 151 setIsThumb(true); 152 break; 153 case MCAF_Code32: 154 setIsThumb(false); 155 break; 156 } 157 } 158 } // end anonymous namespace 159 160 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op) const { 161 bool HasThumb2 = STI->getFeatureBits()[ARM::FeatureThumb2]; 162 163 switch (Op) { 164 default: 165 return Op; 166 case ARM::tBcc: 167 return HasThumb2 ? (unsigned)ARM::t2Bcc : Op; 168 case ARM::tLDRpci: 169 return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op; 170 case ARM::tADR: 171 return HasThumb2 ? (unsigned)ARM::t2ADR : Op; 172 case ARM::tB: 173 return HasThumb2 ? (unsigned)ARM::t2B : Op; 174 case ARM::tCBZ: 175 return ARM::tHINT; 176 case ARM::tCBNZ: 177 return ARM::tHINT; 178 } 179 } 180 181 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst) const { 182 if (getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode()) 183 return true; 184 return false; 185 } 186 187 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup, 188 uint64_t Value) const { 189 switch ((unsigned)Fixup.getKind()) { 190 case ARM::fixup_arm_thumb_br: { 191 // Relaxing tB to t2B. tB has a signed 12-bit displacement with the 192 // low bit being an implied zero. There's an implied +4 offset for the 193 // branch, so we adjust the other way here to determine what's 194 // encodable. 195 // 196 // Relax if the value is too big for a (signed) i8. 197 int64_t Offset = int64_t(Value) - 4; 198 if (Offset > 2046 || Offset < -2048) 199 return "out of range pc-relative fixup value"; 200 break; 201 } 202 case ARM::fixup_arm_thumb_bcc: { 203 // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the 204 // low bit being an implied zero. There's an implied +4 offset for the 205 // branch, so we adjust the other way here to determine what's 206 // encodable. 207 // 208 // Relax if the value is too big for a (signed) i8. 209 int64_t Offset = int64_t(Value) - 4; 210 if (Offset > 254 || Offset < -256) 211 return "out of range pc-relative fixup value"; 212 break; 213 } 214 case ARM::fixup_thumb_adr_pcrel_10: 215 case ARM::fixup_arm_thumb_cp: { 216 // If the immediate is negative, greater than 1020, or not a multiple 217 // of four, the wide version of the instruction must be used. 218 int64_t Offset = int64_t(Value) - 4; 219 if (Offset & 3) 220 return "misaligned pc-relative fixup value"; 221 else if (Offset > 1020 || Offset < 0) 222 return "out of range pc-relative fixup value"; 223 break; 224 } 225 case ARM::fixup_arm_thumb_cb: { 226 // If we have a Thumb CBZ or CBNZ instruction and its target is the next 227 // instruction it is is actually out of range for the instruction. 228 // It will be changed to a NOP. 229 int64_t Offset = (Value & ~1); 230 if (Offset == 2) 231 return "will be converted to nop"; 232 break; 233 } 234 default: 235 llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!"); 236 } 237 return nullptr; 238 } 239 240 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 241 const MCRelaxableFragment *DF, 242 const MCAsmLayout &Layout) const { 243 return reasonForFixupRelaxation(Fixup, Value); 244 } 245 246 void ARMAsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const { 247 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode()); 248 249 // Sanity check w/ diagnostic if we get here w/ a bogus instruction. 250 if (RelaxedOp == Inst.getOpcode()) { 251 SmallString<256> Tmp; 252 raw_svector_ostream OS(Tmp); 253 Inst.dump_pretty(OS); 254 OS << "\n"; 255 report_fatal_error("unexpected instruction to relax: " + OS.str()); 256 } 257 258 // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we 259 // have to change the operands too. 260 if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) && 261 RelaxedOp == ARM::tHINT) { 262 Res.setOpcode(RelaxedOp); 263 Res.addOperand(MCOperand::createImm(0)); 264 Res.addOperand(MCOperand::createImm(14)); 265 Res.addOperand(MCOperand::createReg(0)); 266 return; 267 } 268 269 // The rest of instructions we're relaxing have the same operands. 270 // We just need to update to the proper opcode. 271 Res = Inst; 272 Res.setOpcode(RelaxedOp); 273 } 274 275 bool ARMAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { 276 const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8 277 const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP 278 const uint32_t ARMv4_NopEncoding = 0xe1a00000; // using MOV r0,r0 279 const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP 280 if (isThumb()) { 281 const uint16_t nopEncoding = 282 hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding; 283 uint64_t NumNops = Count / 2; 284 for (uint64_t i = 0; i != NumNops; ++i) 285 OW->write16(nopEncoding); 286 if (Count & 1) 287 OW->write8(0); 288 return true; 289 } 290 // ARM mode 291 const uint32_t nopEncoding = 292 hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding; 293 uint64_t NumNops = Count / 4; 294 for (uint64_t i = 0; i != NumNops; ++i) 295 OW->write32(nopEncoding); 296 // FIXME: should this function return false when unable to write exactly 297 // 'Count' bytes with NOP encodings? 298 switch (Count % 4) { 299 default: 300 break; // No leftover bytes to write 301 case 1: 302 OW->write8(0); 303 break; 304 case 2: 305 OW->write16(0); 306 break; 307 case 3: 308 OW->write16(0); 309 OW->write8(0xa0); 310 break; 311 } 312 313 return true; 314 } 315 316 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) { 317 if (IsLittleEndian) { 318 // Note that the halfwords are stored high first and low second in thumb; 319 // so we need to swap the fixup value here to map properly. 320 uint32_t Swapped = (Value & 0xFFFF0000) >> 16; 321 Swapped |= (Value & 0x0000FFFF) << 16; 322 return Swapped; 323 } else 324 return Value; 325 } 326 327 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf, 328 bool IsLittleEndian) { 329 uint32_t Value; 330 331 if (IsLittleEndian) { 332 Value = (SecondHalf & 0xFFFF) << 16; 333 Value |= (FirstHalf & 0xFFFF); 334 } else { 335 Value = (SecondHalf & 0xFFFF); 336 Value |= (FirstHalf & 0xFFFF) << 16; 337 } 338 339 return Value; 340 } 341 342 unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value, 343 bool IsPCRel, MCContext *Ctx, 344 bool IsLittleEndian, 345 bool IsResolved) const { 346 unsigned Kind = Fixup.getKind(); 347 switch (Kind) { 348 default: 349 llvm_unreachable("Unknown fixup kind!"); 350 case FK_Data_1: 351 case FK_Data_2: 352 case FK_Data_4: 353 return Value; 354 case FK_SecRel_2: 355 return Value; 356 case FK_SecRel_4: 357 return Value; 358 case ARM::fixup_arm_movt_hi16: 359 if (!IsPCRel) 360 Value >>= 16; 361 // Fallthrough 362 case ARM::fixup_arm_movw_lo16: { 363 unsigned Hi4 = (Value & 0xF000) >> 12; 364 unsigned Lo12 = Value & 0x0FFF; 365 // inst{19-16} = Hi4; 366 // inst{11-0} = Lo12; 367 Value = (Hi4 << 16) | (Lo12); 368 return Value; 369 } 370 case ARM::fixup_t2_movt_hi16: 371 if (!IsPCRel) 372 Value >>= 16; 373 // Fallthrough 374 case ARM::fixup_t2_movw_lo16: { 375 unsigned Hi4 = (Value & 0xF000) >> 12; 376 unsigned i = (Value & 0x800) >> 11; 377 unsigned Mid3 = (Value & 0x700) >> 8; 378 unsigned Lo8 = Value & 0x0FF; 379 // inst{19-16} = Hi4; 380 // inst{26} = i; 381 // inst{14-12} = Mid3; 382 // inst{7-0} = Lo8; 383 Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8); 384 return swapHalfWords(Value, IsLittleEndian); 385 } 386 case ARM::fixup_arm_ldst_pcrel_12: 387 // ARM PC-relative values are offset by 8. 388 Value -= 4; 389 // FALLTHROUGH 390 case ARM::fixup_t2_ldst_pcrel_12: { 391 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 392 Value -= 4; 393 bool isAdd = true; 394 if ((int64_t)Value < 0) { 395 Value = -Value; 396 isAdd = false; 397 } 398 if (Ctx && Value >= 4096) 399 Ctx->reportFatalError(Fixup.getLoc(), "out of range pc-relative fixup value"); 400 Value |= isAdd << 23; 401 402 // Same addressing mode as fixup_arm_pcrel_10, 403 // but with 16-bit halfwords swapped. 404 if (Kind == ARM::fixup_t2_ldst_pcrel_12) 405 return swapHalfWords(Value, IsLittleEndian); 406 407 return Value; 408 } 409 case ARM::fixup_arm_adr_pcrel_12: { 410 // ARM PC-relative values are offset by 8. 411 Value -= 8; 412 unsigned opc = 4; // bits {24-21}. Default to add: 0b0100 413 if ((int64_t)Value < 0) { 414 Value = -Value; 415 opc = 2; // 0b0010 416 } 417 if (Ctx && ARM_AM::getSOImmVal(Value) == -1) 418 Ctx->reportFatalError(Fixup.getLoc(), "out of range pc-relative fixup value"); 419 // Encode the immediate and shift the opcode into place. 420 return ARM_AM::getSOImmVal(Value) | (opc << 21); 421 } 422 423 case ARM::fixup_t2_adr_pcrel_12: { 424 Value -= 4; 425 unsigned opc = 0; 426 if ((int64_t)Value < 0) { 427 Value = -Value; 428 opc = 5; 429 } 430 431 uint32_t out = (opc << 21); 432 out |= (Value & 0x800) << 15; 433 out |= (Value & 0x700) << 4; 434 out |= (Value & 0x0FF); 435 436 return swapHalfWords(out, IsLittleEndian); 437 } 438 439 case ARM::fixup_arm_condbranch: 440 case ARM::fixup_arm_uncondbranch: 441 case ARM::fixup_arm_uncondbl: 442 case ARM::fixup_arm_condbl: 443 case ARM::fixup_arm_blx: 444 // These values don't encode the low two bits since they're always zero. 445 // Offset by 8 just as above. 446 if (const MCSymbolRefExpr *SRE = 447 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 448 if (SRE->getKind() == MCSymbolRefExpr::VK_ARM_TLSCALL) 449 return 0; 450 return 0xffffff & ((Value - 8) >> 2); 451 case ARM::fixup_t2_uncondbranch: { 452 Value = Value - 4; 453 Value >>= 1; // Low bit is not encoded. 454 455 uint32_t out = 0; 456 bool I = Value & 0x800000; 457 bool J1 = Value & 0x400000; 458 bool J2 = Value & 0x200000; 459 J1 ^= I; 460 J2 ^= I; 461 462 out |= I << 26; // S bit 463 out |= !J1 << 13; // J1 bit 464 out |= !J2 << 11; // J2 bit 465 out |= (Value & 0x1FF800) << 5; // imm6 field 466 out |= (Value & 0x0007FF); // imm11 field 467 468 return swapHalfWords(out, IsLittleEndian); 469 } 470 case ARM::fixup_t2_condbranch: { 471 Value = Value - 4; 472 Value >>= 1; // Low bit is not encoded. 473 474 uint64_t out = 0; 475 out |= (Value & 0x80000) << 7; // S bit 476 out |= (Value & 0x40000) >> 7; // J2 bit 477 out |= (Value & 0x20000) >> 4; // J1 bit 478 out |= (Value & 0x1F800) << 5; // imm6 field 479 out |= (Value & 0x007FF); // imm11 field 480 481 return swapHalfWords(out, IsLittleEndian); 482 } 483 case ARM::fixup_arm_thumb_bl: { 484 // The value doesn't encode the low bit (always zero) and is offset by 485 // four. The 32-bit immediate value is encoded as 486 // imm32 = SignExtend(S:I1:I2:imm10:imm11:0) 487 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 488 // The value is encoded into disjoint bit positions in the destination 489 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 490 // J = either J1 or J2 bit 491 // 492 // BL: xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII 493 // 494 // Note that the halfwords are stored high first, low second; so we need 495 // to transpose the fixup value here to map properly. 496 uint32_t offset = (Value - 4) >> 1; 497 uint32_t signBit = (offset & 0x800000) >> 23; 498 uint32_t I1Bit = (offset & 0x400000) >> 22; 499 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 500 uint32_t I2Bit = (offset & 0x200000) >> 21; 501 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 502 uint32_t imm10Bits = (offset & 0x1FF800) >> 11; 503 uint32_t imm11Bits = (offset & 0x000007FF); 504 505 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits); 506 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 507 (uint16_t)imm11Bits); 508 return joinHalfWords(FirstHalf, SecondHalf, IsLittleEndian); 509 } 510 case ARM::fixup_arm_thumb_blx: { 511 // The value doesn't encode the low two bits (always zero) and is offset by 512 // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as 513 // imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00) 514 // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S). 515 // The value is encoded into disjoint bit positions in the destination 516 // opcode. x = unchanged, I = immediate value bit, S = sign extension bit, 517 // J = either J1 or J2 bit, 0 = zero. 518 // 519 // BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0 520 // 521 // Note that the halfwords are stored high first, low second; so we need 522 // to transpose the fixup value here to map properly. 523 uint32_t offset = (Value - 2) >> 2; 524 if (const MCSymbolRefExpr *SRE = 525 dyn_cast<MCSymbolRefExpr>(Fixup.getValue())) 526 if (SRE->getKind() == MCSymbolRefExpr::VK_ARM_TLSCALL) 527 offset = 0; 528 uint32_t signBit = (offset & 0x400000) >> 22; 529 uint32_t I1Bit = (offset & 0x200000) >> 21; 530 uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit; 531 uint32_t I2Bit = (offset & 0x100000) >> 20; 532 uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit; 533 uint32_t imm10HBits = (offset & 0xFFC00) >> 10; 534 uint32_t imm10LBits = (offset & 0x3FF); 535 536 uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits); 537 uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) | 538 ((uint16_t)imm10LBits) << 1); 539 return joinHalfWords(FirstHalf, SecondHalf, IsLittleEndian); 540 } 541 case ARM::fixup_thumb_adr_pcrel_10: 542 case ARM::fixup_arm_thumb_cp: 543 // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we 544 // could have an error on our hands. 545 if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) { 546 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 547 if (FixupDiagnostic) 548 Ctx->reportFatalError(Fixup.getLoc(), FixupDiagnostic); 549 } 550 // Offset by 4, and don't encode the low two bits. 551 return ((Value - 4) >> 2) & 0xff; 552 case ARM::fixup_arm_thumb_cb: { 553 // Offset by 4 and don't encode the lower bit, which is always 0. 554 // FIXME: diagnose if no Thumb2 555 uint32_t Binary = (Value - 4) >> 1; 556 return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3); 557 } 558 case ARM::fixup_arm_thumb_br: 559 // Offset by 4 and don't encode the lower bit, which is always 0. 560 if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2]) { 561 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 562 if (FixupDiagnostic) 563 Ctx->reportFatalError(Fixup.getLoc(), FixupDiagnostic); 564 } 565 return ((Value - 4) >> 1) & 0x7ff; 566 case ARM::fixup_arm_thumb_bcc: 567 // Offset by 4 and don't encode the lower bit, which is always 0. 568 if (Ctx && !STI->getFeatureBits()[ARM::FeatureThumb2]) { 569 const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value); 570 if (FixupDiagnostic) 571 Ctx->reportFatalError(Fixup.getLoc(), FixupDiagnostic); 572 } 573 return ((Value - 4) >> 1) & 0xff; 574 case ARM::fixup_arm_pcrel_10_unscaled: { 575 Value = Value - 8; // ARM fixups offset by an additional word and don't 576 // need to adjust for the half-word ordering. 577 bool isAdd = true; 578 if ((int64_t)Value < 0) { 579 Value = -Value; 580 isAdd = false; 581 } 582 // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8]. 583 if (Ctx && Value >= 256) 584 Ctx->reportFatalError(Fixup.getLoc(), "out of range pc-relative fixup value"); 585 Value = (Value & 0xf) | ((Value & 0xf0) << 4); 586 return Value | (isAdd << 23); 587 } 588 case ARM::fixup_arm_pcrel_10: 589 Value = Value - 4; // ARM fixups offset by an additional word and don't 590 // need to adjust for the half-word ordering. 591 // Fall through. 592 case ARM::fixup_t2_pcrel_10: { 593 // Offset by 4, adjusted by two due to the half-word ordering of thumb. 594 Value = Value - 4; 595 bool isAdd = true; 596 if ((int64_t)Value < 0) { 597 Value = -Value; 598 isAdd = false; 599 } 600 // These values don't encode the low two bits since they're always zero. 601 Value >>= 2; 602 if (Ctx && Value >= 256) 603 Ctx->reportFatalError(Fixup.getLoc(), "out of range pc-relative fixup value"); 604 Value |= isAdd << 23; 605 606 // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords 607 // swapped. 608 if (Kind == ARM::fixup_t2_pcrel_10) 609 return swapHalfWords(Value, IsLittleEndian); 610 611 return Value; 612 } 613 } 614 } 615 616 void ARMAsmBackend::processFixupValue(const MCAssembler &Asm, 617 const MCAsmLayout &Layout, 618 const MCFixup &Fixup, 619 const MCFragment *DF, 620 const MCValue &Target, uint64_t &Value, 621 bool &IsResolved) { 622 const MCSymbolRefExpr *A = Target.getSymA(); 623 // Some fixups to thumb function symbols need the low bit (thumb bit) 624 // twiddled. 625 if ((unsigned)Fixup.getKind() != ARM::fixup_arm_ldst_pcrel_12 && 626 (unsigned)Fixup.getKind() != ARM::fixup_t2_ldst_pcrel_12 && 627 (unsigned)Fixup.getKind() != ARM::fixup_arm_adr_pcrel_12 && 628 (unsigned)Fixup.getKind() != ARM::fixup_thumb_adr_pcrel_10 && 629 (unsigned)Fixup.getKind() != ARM::fixup_t2_adr_pcrel_12 && 630 (unsigned)Fixup.getKind() != ARM::fixup_arm_thumb_cp) { 631 if (A) { 632 const MCSymbol &Sym = A->getSymbol(); 633 if (Asm.isThumbFunc(&Sym)) 634 Value |= 1; 635 } 636 } 637 // For Thumb1 BL instruction, it is possible to be a long jump between 638 // the basic blocks of the same function. Thus, we would like to resolve 639 // the offset when the destination has the same MCFragment. 640 if (A && (unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_bl) { 641 const MCSymbol &Sym = A->getSymbol(); 642 IsResolved = (Sym.getFragment() == DF); 643 } 644 // We must always generate a relocation for BL/BLX instructions if we have 645 // a symbol to reference, as the linker relies on knowing the destination 646 // symbol's thumb-ness to get interworking right. 647 if (A && ((unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_blx || 648 (unsigned)Fixup.getKind() == ARM::fixup_arm_blx || 649 (unsigned)Fixup.getKind() == ARM::fixup_arm_uncondbl || 650 (unsigned)Fixup.getKind() == ARM::fixup_arm_condbl)) 651 IsResolved = false; 652 653 // Try to get the encoded value for the fixup as-if we're mapping it into 654 // the instruction. This allows adjustFixupValue() to issue a diagnostic 655 // if the value aren't invalid. 656 (void)adjustFixupValue(Fixup, Value, false, &Asm.getContext(), 657 IsLittleEndian, IsResolved); 658 } 659 660 /// getFixupKindNumBytes - The number of bytes the fixup may change. 661 static unsigned getFixupKindNumBytes(unsigned Kind) { 662 switch (Kind) { 663 default: 664 llvm_unreachable("Unknown fixup kind!"); 665 666 case FK_Data_1: 667 case ARM::fixup_arm_thumb_bcc: 668 case ARM::fixup_arm_thumb_cp: 669 case ARM::fixup_thumb_adr_pcrel_10: 670 return 1; 671 672 case FK_Data_2: 673 case ARM::fixup_arm_thumb_br: 674 case ARM::fixup_arm_thumb_cb: 675 return 2; 676 677 case ARM::fixup_arm_pcrel_10_unscaled: 678 case ARM::fixup_arm_ldst_pcrel_12: 679 case ARM::fixup_arm_pcrel_10: 680 case ARM::fixup_arm_adr_pcrel_12: 681 case ARM::fixup_arm_uncondbl: 682 case ARM::fixup_arm_condbl: 683 case ARM::fixup_arm_blx: 684 case ARM::fixup_arm_condbranch: 685 case ARM::fixup_arm_uncondbranch: 686 return 3; 687 688 case FK_Data_4: 689 case ARM::fixup_t2_ldst_pcrel_12: 690 case ARM::fixup_t2_condbranch: 691 case ARM::fixup_t2_uncondbranch: 692 case ARM::fixup_t2_pcrel_10: 693 case ARM::fixup_t2_adr_pcrel_12: 694 case ARM::fixup_arm_thumb_bl: 695 case ARM::fixup_arm_thumb_blx: 696 case ARM::fixup_arm_movt_hi16: 697 case ARM::fixup_arm_movw_lo16: 698 case ARM::fixup_t2_movt_hi16: 699 case ARM::fixup_t2_movw_lo16: 700 return 4; 701 702 case FK_SecRel_2: 703 return 2; 704 case FK_SecRel_4: 705 return 4; 706 } 707 } 708 709 /// getFixupKindContainerSizeBytes - The number of bytes of the 710 /// container involved in big endian. 711 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) { 712 switch (Kind) { 713 default: 714 llvm_unreachable("Unknown fixup kind!"); 715 716 case FK_Data_1: 717 return 1; 718 case FK_Data_2: 719 return 2; 720 case FK_Data_4: 721 return 4; 722 723 case ARM::fixup_arm_thumb_bcc: 724 case ARM::fixup_arm_thumb_cp: 725 case ARM::fixup_thumb_adr_pcrel_10: 726 case ARM::fixup_arm_thumb_br: 727 case ARM::fixup_arm_thumb_cb: 728 // Instruction size is 2 bytes. 729 return 2; 730 731 case ARM::fixup_arm_pcrel_10_unscaled: 732 case ARM::fixup_arm_ldst_pcrel_12: 733 case ARM::fixup_arm_pcrel_10: 734 case ARM::fixup_arm_adr_pcrel_12: 735 case ARM::fixup_arm_uncondbl: 736 case ARM::fixup_arm_condbl: 737 case ARM::fixup_arm_blx: 738 case ARM::fixup_arm_condbranch: 739 case ARM::fixup_arm_uncondbranch: 740 case ARM::fixup_t2_ldst_pcrel_12: 741 case ARM::fixup_t2_condbranch: 742 case ARM::fixup_t2_uncondbranch: 743 case ARM::fixup_t2_pcrel_10: 744 case ARM::fixup_t2_adr_pcrel_12: 745 case ARM::fixup_arm_thumb_bl: 746 case ARM::fixup_arm_thumb_blx: 747 case ARM::fixup_arm_movt_hi16: 748 case ARM::fixup_arm_movw_lo16: 749 case ARM::fixup_t2_movt_hi16: 750 case ARM::fixup_t2_movw_lo16: 751 // Instruction size is 4 bytes. 752 return 4; 753 } 754 } 755 756 void ARMAsmBackend::applyFixup(const MCFixup &Fixup, char *Data, 757 unsigned DataSize, uint64_t Value, 758 bool IsPCRel) const { 759 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); 760 Value = 761 adjustFixupValue(Fixup, Value, IsPCRel, nullptr, IsLittleEndian, true); 762 if (!Value) 763 return; // Doesn't change encoding. 764 765 unsigned Offset = Fixup.getOffset(); 766 assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!"); 767 768 // Used to point to big endian bytes. 769 unsigned FullSizeBytes; 770 if (!IsLittleEndian) { 771 FullSizeBytes = getFixupKindContainerSizeBytes(Fixup.getKind()); 772 assert((Offset + FullSizeBytes) <= DataSize && "Invalid fixup size!"); 773 assert(NumBytes <= FullSizeBytes && "Invalid fixup size!"); 774 } 775 776 // For each byte of the fragment that the fixup touches, mask in the bits from 777 // the fixup value. The Value has been "split up" into the appropriate 778 // bitfields above. 779 for (unsigned i = 0; i != NumBytes; ++i) { 780 unsigned Idx = IsLittleEndian ? i : (FullSizeBytes - 1 - i); 781 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); 782 } 783 } 784 785 namespace CU { 786 787 /// \brief Compact unwind encoding values. 788 enum CompactUnwindEncodings { 789 UNWIND_ARM_MODE_MASK = 0x0F000000, 790 UNWIND_ARM_MODE_FRAME = 0x01000000, 791 UNWIND_ARM_MODE_FRAME_D = 0x02000000, 792 UNWIND_ARM_MODE_DWARF = 0x04000000, 793 794 UNWIND_ARM_FRAME_STACK_ADJUST_MASK = 0x00C00000, 795 796 UNWIND_ARM_FRAME_FIRST_PUSH_R4 = 0x00000001, 797 UNWIND_ARM_FRAME_FIRST_PUSH_R5 = 0x00000002, 798 UNWIND_ARM_FRAME_FIRST_PUSH_R6 = 0x00000004, 799 800 UNWIND_ARM_FRAME_SECOND_PUSH_R8 = 0x00000008, 801 UNWIND_ARM_FRAME_SECOND_PUSH_R9 = 0x00000010, 802 UNWIND_ARM_FRAME_SECOND_PUSH_R10 = 0x00000020, 803 UNWIND_ARM_FRAME_SECOND_PUSH_R11 = 0x00000040, 804 UNWIND_ARM_FRAME_SECOND_PUSH_R12 = 0x00000080, 805 806 UNWIND_ARM_FRAME_D_REG_COUNT_MASK = 0x00000F00, 807 808 UNWIND_ARM_DWARF_SECTION_OFFSET = 0x00FFFFFF 809 }; 810 811 } // end CU namespace 812 813 /// Generate compact unwind encoding for the function based on the CFI 814 /// instructions. If the CFI instructions describe a frame that cannot be 815 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which 816 /// tells the runtime to fallback and unwind using dwarf. 817 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding( 818 ArrayRef<MCCFIInstruction> Instrs) const { 819 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n"); 820 // Only armv7k uses CFI based unwinding. 821 if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K) 822 return 0; 823 // No .cfi directives means no frame. 824 if (Instrs.empty()) 825 return 0; 826 // Start off assuming CFA is at SP+0. 827 int CFARegister = ARM::SP; 828 int CFARegisterOffset = 0; 829 // Mark savable registers as initially unsaved 830 DenseMap<unsigned, int> RegOffsets; 831 int FloatRegCount = 0; 832 // Process each .cfi directive and build up compact unwind info. 833 for (size_t i = 0, e = Instrs.size(); i != e; ++i) { 834 int Reg; 835 const MCCFIInstruction &Inst = Instrs[i]; 836 switch (Inst.getOperation()) { 837 case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa 838 CFARegisterOffset = -Inst.getOffset(); 839 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true); 840 break; 841 case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset 842 CFARegisterOffset = -Inst.getOffset(); 843 break; 844 case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register 845 CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true); 846 break; 847 case MCCFIInstruction::OpOffset: // DW_CFA_offset 848 Reg = MRI.getLLVMRegNum(Inst.getRegister(), true); 849 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 850 RegOffsets[Reg] = Inst.getOffset(); 851 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 852 RegOffsets[Reg] = Inst.getOffset(); 853 ++FloatRegCount; 854 } else { 855 DEBUG_WITH_TYPE("compact-unwind", 856 llvm::dbgs() << ".cfi_offset on unknown register=" 857 << Inst.getRegister() << "\n"); 858 return CU::UNWIND_ARM_MODE_DWARF; 859 } 860 break; 861 case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc 862 // Ignore 863 break; 864 default: 865 // Directive not convertable to compact unwind, bail out. 866 DEBUG_WITH_TYPE("compact-unwind", 867 llvm::dbgs() 868 << "CFI directive not compatiable with comact " 869 "unwind encoding, opcode=" << Inst.getOperation() 870 << "\n"); 871 return CU::UNWIND_ARM_MODE_DWARF; 872 break; 873 } 874 } 875 876 // If no frame set up, return no unwind info. 877 if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0)) 878 return 0; 879 880 // Verify standard frame (lr/r7) was used. 881 if (CFARegister != ARM::R7) { 882 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is " 883 << CFARegister 884 << " instead of r7\n"); 885 return CU::UNWIND_ARM_MODE_DWARF; 886 } 887 int StackAdjust = CFARegisterOffset - 8; 888 if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) { 889 DEBUG_WITH_TYPE("compact-unwind", 890 llvm::dbgs() 891 << "LR not saved as standard frame, StackAdjust=" 892 << StackAdjust 893 << ", CFARegisterOffset=" << CFARegisterOffset 894 << ", lr save at offset=" << RegOffsets[14] << "\n"); 895 return CU::UNWIND_ARM_MODE_DWARF; 896 } 897 if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) { 898 DEBUG_WITH_TYPE("compact-unwind", 899 llvm::dbgs() << "r7 not saved as standard frame\n"); 900 return CU::UNWIND_ARM_MODE_DWARF; 901 } 902 uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME; 903 904 // If var-args are used, there may be a stack adjust required. 905 switch (StackAdjust) { 906 case 0: 907 break; 908 case 4: 909 CompactUnwindEncoding |= 0x00400000; 910 break; 911 case 8: 912 CompactUnwindEncoding |= 0x00800000; 913 break; 914 case 12: 915 CompactUnwindEncoding |= 0x00C00000; 916 break; 917 default: 918 DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() 919 << ".cfi_def_cfa stack adjust (" 920 << StackAdjust << ") out of range\n"); 921 return CU::UNWIND_ARM_MODE_DWARF; 922 } 923 924 // If r6 is saved, it must be right below r7. 925 static struct { 926 unsigned Reg; 927 unsigned Encoding; 928 } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6}, 929 {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5}, 930 {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4}, 931 {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12}, 932 {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11}, 933 {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10}, 934 {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9}, 935 {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}}; 936 937 int CurOffset = -8 - StackAdjust; 938 for (auto CSReg : GPRCSRegs) { 939 auto Offset = RegOffsets.find(CSReg.Reg); 940 if (Offset == RegOffsets.end()) 941 continue; 942 943 int RegOffset = Offset->second; 944 if (RegOffset != CurOffset - 4) { 945 DEBUG_WITH_TYPE("compact-unwind", 946 llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at " 947 << RegOffset << " but only supported at " 948 << CurOffset << "\n"); 949 return CU::UNWIND_ARM_MODE_DWARF; 950 } 951 CompactUnwindEncoding |= CSReg.Encoding; 952 CurOffset -= 4; 953 } 954 955 // If no floats saved, we are done. 956 if (FloatRegCount == 0) 957 return CompactUnwindEncoding; 958 959 // Switch mode to include D register saving. 960 CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK; 961 CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D; 962 963 // FIXME: supporting more than 4 saved D-registers compactly would be trivial, 964 // but needs coordination with the linker and libunwind. 965 if (FloatRegCount > 4) { 966 DEBUG_WITH_TYPE("compact-unwind", 967 llvm::dbgs() << "unsupported number of D registers saved (" 968 << FloatRegCount << ")\n"); 969 return CU::UNWIND_ARM_MODE_DWARF; 970 } 971 972 // Floating point registers must either be saved sequentially, or we defer to 973 // DWARF. No gaps allowed here so check that each saved d-register is 974 // precisely where it should be. 975 static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 }; 976 for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) { 977 auto Offset = RegOffsets.find(FPRCSRegs[Idx]); 978 if (Offset == RegOffsets.end()) { 979 DEBUG_WITH_TYPE("compact-unwind", 980 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 981 << MRI.getName(FPRCSRegs[Idx]) 982 << " not saved\n"); 983 return CU::UNWIND_ARM_MODE_DWARF; 984 } else if (Offset->second != CurOffset - 8) { 985 DEBUG_WITH_TYPE("compact-unwind", 986 llvm::dbgs() << FloatRegCount << " D-regs saved, but " 987 << MRI.getName(FPRCSRegs[Idx]) 988 << " saved at " << Offset->second 989 << ", expected at " << CurOffset - 8 990 << "\n"); 991 return CU::UNWIND_ARM_MODE_DWARF; 992 } 993 CurOffset -= 8; 994 } 995 996 return CompactUnwindEncoding | ((FloatRegCount - 1) << 8); 997 } 998 999 static MachO::CPUSubTypeARM getMachOSubTypeFromArch(StringRef Arch) { 1000 unsigned AK = ARM::parseArch(Arch); 1001 switch (AK) { 1002 default: 1003 return MachO::CPU_SUBTYPE_ARM_V7; 1004 case ARM::AK_ARMV4T: 1005 return MachO::CPU_SUBTYPE_ARM_V4T; 1006 case ARM::AK_ARMV6: 1007 case ARM::AK_ARMV6K: 1008 return MachO::CPU_SUBTYPE_ARM_V6; 1009 case ARM::AK_ARMV5: 1010 return MachO::CPU_SUBTYPE_ARM_V5; 1011 case ARM::AK_ARMV5T: 1012 case ARM::AK_ARMV5E: 1013 case ARM::AK_ARMV5TE: 1014 case ARM::AK_ARMV5TEJ: 1015 return MachO::CPU_SUBTYPE_ARM_V5TEJ; 1016 case ARM::AK_ARMV7: 1017 return MachO::CPU_SUBTYPE_ARM_V7; 1018 case ARM::AK_ARMV7S: 1019 return MachO::CPU_SUBTYPE_ARM_V7S; 1020 case ARM::AK_ARMV7K: 1021 return MachO::CPU_SUBTYPE_ARM_V7K; 1022 case ARM::AK_ARMV6M: 1023 case ARM::AK_ARMV6SM: 1024 return MachO::CPU_SUBTYPE_ARM_V6M; 1025 case ARM::AK_ARMV7M: 1026 return MachO::CPU_SUBTYPE_ARM_V7M; 1027 case ARM::AK_ARMV7EM: 1028 return MachO::CPU_SUBTYPE_ARM_V7EM; 1029 } 1030 } 1031 1032 MCAsmBackend *llvm::createARMAsmBackend(const Target &T, 1033 const MCRegisterInfo &MRI, 1034 const Triple &TheTriple, StringRef CPU, 1035 bool isLittle) { 1036 switch (TheTriple.getObjectFormat()) { 1037 default: 1038 llvm_unreachable("unsupported object format"); 1039 case Triple::MachO: { 1040 MachO::CPUSubTypeARM CS = getMachOSubTypeFromArch(TheTriple.getArchName()); 1041 return new ARMAsmBackendDarwin(T, TheTriple, MRI, CS); 1042 } 1043 case Triple::COFF: 1044 assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported"); 1045 return new ARMAsmBackendWinCOFF(T, TheTriple); 1046 case Triple::ELF: 1047 assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target"); 1048 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 1049 return new ARMAsmBackendELF(T, TheTriple, OSABI, isLittle); 1050 } 1051 } 1052 1053 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T, 1054 const MCRegisterInfo &MRI, 1055 const Triple &TT, StringRef CPU) { 1056 return createARMAsmBackend(T, MRI, TT, CPU, true); 1057 } 1058 1059 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T, 1060 const MCRegisterInfo &MRI, 1061 const Triple &TT, StringRef CPU) { 1062 return createARMAsmBackend(T, MRI, TT, CPU, false); 1063 } 1064 1065 MCAsmBackend *llvm::createThumbLEAsmBackend(const Target &T, 1066 const MCRegisterInfo &MRI, 1067 const Triple &TT, StringRef CPU) { 1068 return createARMAsmBackend(T, MRI, TT, CPU, true); 1069 } 1070 1071 MCAsmBackend *llvm::createThumbBEAsmBackend(const Target &T, 1072 const MCRegisterInfo &MRI, 1073 const Triple &TT, StringRef CPU) { 1074 return createARMAsmBackend(T, MRI, TT, CPU, false); 1075 } 1076