1 //===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "AArch64.h" 11 #include "AArch64RegisterInfo.h" 12 #include "MCTargetDesc/AArch64FixupKinds.h" 13 #include "llvm/ADT/Triple.h" 14 #include "llvm/MC/MCAssembler.h" 15 #include "llvm/MC/MCAsmBackend.h" 16 #include "llvm/MC/MCContext.h" 17 #include "llvm/MC/MCDirectives.h" 18 #include "llvm/MC/MCELFObjectWriter.h" 19 #include "llvm/MC/MCFixupKindInfo.h" 20 #include "llvm/MC/MCObjectWriter.h" 21 #include "llvm/MC/MCSectionELF.h" 22 #include "llvm/MC/MCSectionMachO.h" 23 #include "llvm/MC/MCValue.h" 24 #include "llvm/Support/ErrorHandling.h" 25 #include "llvm/Support/MachO.h" 26 using namespace llvm; 27 28 namespace { 29 30 class AArch64AsmBackend : public MCAsmBackend { 31 static const unsigned PCRelFlagVal = 32 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits | MCFixupKindInfo::FKF_IsPCRel; 33 public: 34 bool IsLittleEndian; 35 36 public: 37 AArch64AsmBackend(const Target &T, bool IsLittleEndian) 38 : MCAsmBackend(), IsLittleEndian(IsLittleEndian) {} 39 40 unsigned getNumFixupKinds() const override { 41 return AArch64::NumTargetFixupKinds; 42 } 43 44 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override { 45 const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = { 46 // This table *must* be in the order that the fixup_* kinds are defined in 47 // AArch64FixupKinds.h. 48 // 49 // Name Offset (bits) Size (bits) Flags 50 { "fixup_aarch64_pcrel_adr_imm21", 0, 32, PCRelFlagVal }, 51 { "fixup_aarch64_pcrel_adrp_imm21", 0, 32, PCRelFlagVal }, 52 { "fixup_aarch64_add_imm12", 10, 12, 0 }, 53 { "fixup_aarch64_ldst_imm12_scale1", 10, 12, 0 }, 54 { "fixup_aarch64_ldst_imm12_scale2", 10, 12, 0 }, 55 { "fixup_aarch64_ldst_imm12_scale4", 10, 12, 0 }, 56 { "fixup_aarch64_ldst_imm12_scale8", 10, 12, 0 }, 57 { "fixup_aarch64_ldst_imm12_scale16", 10, 12, 0 }, 58 { "fixup_aarch64_ldr_pcrel_imm19", 5, 19, PCRelFlagVal }, 59 { "fixup_aarch64_movw", 5, 16, 0 }, 60 { "fixup_aarch64_pcrel_branch14", 5, 14, PCRelFlagVal }, 61 { "fixup_aarch64_pcrel_branch19", 5, 19, PCRelFlagVal }, 62 { "fixup_aarch64_pcrel_branch26", 0, 26, PCRelFlagVal }, 63 { "fixup_aarch64_pcrel_call26", 0, 26, PCRelFlagVal }, 64 { "fixup_aarch64_tlsdesc_call", 0, 0, 0 } 65 }; 66 67 if (Kind < FirstTargetFixupKind) 68 return MCAsmBackend::getFixupKindInfo(Kind); 69 70 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && 71 "Invalid kind!"); 72 return Infos[Kind - FirstTargetFixupKind]; 73 } 74 75 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, 76 uint64_t Value, bool IsPCRel) const override; 77 78 bool mayNeedRelaxation(const MCInst &Inst) const override; 79 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, 80 const MCRelaxableFragment *DF, 81 const MCAsmLayout &Layout) const override; 82 void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, 83 MCInst &Res) const override; 84 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override; 85 86 void HandleAssemblerFlag(MCAssemblerFlag Flag) {} 87 88 unsigned getPointerSize() const { return 8; } 89 90 unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const; 91 }; 92 93 } // end anonymous namespace 94 95 /// \brief The number of bytes the fixup may change. 96 static unsigned getFixupKindNumBytes(unsigned Kind) { 97 switch (Kind) { 98 default: 99 llvm_unreachable("Unknown fixup kind!"); 100 101 case AArch64::fixup_aarch64_tlsdesc_call: 102 return 0; 103 104 case FK_Data_1: 105 return 1; 106 107 case FK_Data_2: 108 case AArch64::fixup_aarch64_movw: 109 return 2; 110 111 case AArch64::fixup_aarch64_pcrel_branch14: 112 case AArch64::fixup_aarch64_add_imm12: 113 case AArch64::fixup_aarch64_ldst_imm12_scale1: 114 case AArch64::fixup_aarch64_ldst_imm12_scale2: 115 case AArch64::fixup_aarch64_ldst_imm12_scale4: 116 case AArch64::fixup_aarch64_ldst_imm12_scale8: 117 case AArch64::fixup_aarch64_ldst_imm12_scale16: 118 case AArch64::fixup_aarch64_ldr_pcrel_imm19: 119 case AArch64::fixup_aarch64_pcrel_branch19: 120 return 3; 121 122 case AArch64::fixup_aarch64_pcrel_adr_imm21: 123 case AArch64::fixup_aarch64_pcrel_adrp_imm21: 124 case AArch64::fixup_aarch64_pcrel_branch26: 125 case AArch64::fixup_aarch64_pcrel_call26: 126 case FK_Data_4: 127 return 4; 128 129 case FK_Data_8: 130 return 8; 131 } 132 } 133 134 static unsigned AdrImmBits(unsigned Value) { 135 unsigned lo2 = Value & 0x3; 136 unsigned hi19 = (Value & 0x1ffffc) >> 2; 137 return (hi19 << 5) | (lo2 << 29); 138 } 139 140 static uint64_t adjustFixupValue(const MCFixup &Fixup, uint64_t Value, 141 MCContext *Ctx) { 142 unsigned Kind = Fixup.getKind(); 143 int64_t SignedValue = static_cast<int64_t>(Value); 144 switch (Kind) { 145 default: 146 llvm_unreachable("Unknown fixup kind!"); 147 case AArch64::fixup_aarch64_pcrel_adr_imm21: 148 if (Ctx && (SignedValue > 2097151 || SignedValue < -2097152)) 149 Ctx->reportError(Fixup.getLoc(), "fixup value out of range"); 150 return AdrImmBits(Value & 0x1fffffULL); 151 case AArch64::fixup_aarch64_pcrel_adrp_imm21: 152 return AdrImmBits((Value & 0x1fffff000ULL) >> 12); 153 case AArch64::fixup_aarch64_ldr_pcrel_imm19: 154 case AArch64::fixup_aarch64_pcrel_branch19: 155 // Signed 21-bit immediate 156 if (SignedValue > 2097151 || SignedValue < -2097152) 157 if (Ctx) Ctx->reportError(Fixup.getLoc(), "fixup value out of range"); 158 if (Ctx && (Value & 0x3)) 159 Ctx->reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); 160 // Low two bits are not encoded. 161 return (Value >> 2) & 0x7ffff; 162 case AArch64::fixup_aarch64_add_imm12: 163 case AArch64::fixup_aarch64_ldst_imm12_scale1: 164 // Unsigned 12-bit immediate 165 if (Ctx && Value >= 0x1000) 166 Ctx->reportError(Fixup.getLoc(), "fixup value out of range"); 167 return Value; 168 case AArch64::fixup_aarch64_ldst_imm12_scale2: 169 // Unsigned 12-bit immediate which gets multiplied by 2 170 if (Ctx && (Value >= 0x2000)) 171 Ctx->reportError(Fixup.getLoc(), "fixup value out of range"); 172 if (Ctx && (Value & 0x1)) 173 Ctx->reportError(Fixup.getLoc(), "fixup must be 2-byte aligned"); 174 return Value >> 1; 175 case AArch64::fixup_aarch64_ldst_imm12_scale4: 176 // Unsigned 12-bit immediate which gets multiplied by 4 177 if (Ctx && (Value >= 0x4000)) 178 Ctx->reportError(Fixup.getLoc(), "fixup value out of range"); 179 if (Ctx && (Value & 0x3)) 180 Ctx->reportError(Fixup.getLoc(), "fixup must be 4-byte aligned"); 181 return Value >> 2; 182 case AArch64::fixup_aarch64_ldst_imm12_scale8: 183 // Unsigned 12-bit immediate which gets multiplied by 8 184 if (Ctx && (Value >= 0x8000)) 185 Ctx->reportError(Fixup.getLoc(), "fixup value out of range"); 186 if (Ctx && (Value & 0x7)) 187 Ctx->reportError(Fixup.getLoc(), "fixup must be 8-byte aligned"); 188 return Value >> 3; 189 case AArch64::fixup_aarch64_ldst_imm12_scale16: 190 // Unsigned 12-bit immediate which gets multiplied by 16 191 if (Ctx && (Value >= 0x10000)) 192 Ctx->reportError(Fixup.getLoc(), "fixup value out of range"); 193 if (Ctx && (Value & 0xf)) 194 Ctx->reportError(Fixup.getLoc(), "fixup must be 16-byte aligned"); 195 return Value >> 4; 196 case AArch64::fixup_aarch64_movw: 197 if (Ctx) 198 Ctx->reportError(Fixup.getLoc(), 199 "no resolvable MOVZ/MOVK fixups supported yet"); 200 return Value; 201 case AArch64::fixup_aarch64_pcrel_branch14: 202 // Signed 16-bit immediate 203 if (Ctx && (SignedValue > 32767 || SignedValue < -32768)) 204 Ctx->reportError(Fixup.getLoc(), "fixup value out of range"); 205 // Low two bits are not encoded (4-byte alignment assumed). 206 if (Ctx && (Value & 0x3)) 207 Ctx->reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); 208 return (Value >> 2) & 0x3fff; 209 case AArch64::fixup_aarch64_pcrel_branch26: 210 case AArch64::fixup_aarch64_pcrel_call26: 211 // Signed 28-bit immediate 212 if (Ctx && (SignedValue > 134217727 || SignedValue < -134217728)) 213 Ctx->reportError(Fixup.getLoc(), "fixup value out of range"); 214 // Low two bits are not encoded (4-byte alignment assumed). 215 if (Ctx && (Value & 0x3)) 216 Ctx->reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); 217 return (Value >> 2) & 0x3ffffff; 218 case FK_Data_1: 219 case FK_Data_2: 220 case FK_Data_4: 221 case FK_Data_8: 222 return Value; 223 } 224 } 225 226 /// getFixupKindContainereSizeInBytes - The number of bytes of the 227 /// container involved in big endian or 0 if the item is little endian 228 unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const { 229 if (IsLittleEndian) 230 return 0; 231 232 switch (Kind) { 233 default: 234 llvm_unreachable("Unknown fixup kind!"); 235 236 case FK_Data_1: 237 return 1; 238 case FK_Data_2: 239 return 2; 240 case FK_Data_4: 241 return 4; 242 case FK_Data_8: 243 return 8; 244 245 case AArch64::fixup_aarch64_tlsdesc_call: 246 case AArch64::fixup_aarch64_movw: 247 case AArch64::fixup_aarch64_pcrel_branch14: 248 case AArch64::fixup_aarch64_add_imm12: 249 case AArch64::fixup_aarch64_ldst_imm12_scale1: 250 case AArch64::fixup_aarch64_ldst_imm12_scale2: 251 case AArch64::fixup_aarch64_ldst_imm12_scale4: 252 case AArch64::fixup_aarch64_ldst_imm12_scale8: 253 case AArch64::fixup_aarch64_ldst_imm12_scale16: 254 case AArch64::fixup_aarch64_ldr_pcrel_imm19: 255 case AArch64::fixup_aarch64_pcrel_branch19: 256 case AArch64::fixup_aarch64_pcrel_adr_imm21: 257 case AArch64::fixup_aarch64_pcrel_adrp_imm21: 258 case AArch64::fixup_aarch64_pcrel_branch26: 259 case AArch64::fixup_aarch64_pcrel_call26: 260 // Instructions are always little endian 261 return 0; 262 } 263 } 264 265 void AArch64AsmBackend::applyFixup(const MCFixup &Fixup, char *Data, 266 unsigned DataSize, uint64_t Value, 267 bool IsPCRel) const { 268 unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind()); 269 if (!Value) 270 return; // Doesn't change encoding. 271 MCFixupKindInfo Info = getFixupKindInfo(Fixup.getKind()); 272 // Apply any target-specific value adjustments. 273 Value = adjustFixupValue(Fixup, Value, nullptr); 274 275 // Shift the value into position. 276 Value <<= Info.TargetOffset; 277 278 unsigned Offset = Fixup.getOffset(); 279 assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!"); 280 281 // Used to point to big endian bytes. 282 unsigned FulleSizeInBytes = getFixupKindContainereSizeInBytes(Fixup.getKind()); 283 284 // For each byte of the fragment that the fixup touches, mask in the 285 // bits from the fixup value. 286 if (FulleSizeInBytes == 0) { 287 // Handle as little-endian 288 for (unsigned i = 0; i != NumBytes; ++i) { 289 Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); 290 } 291 } else { 292 // Handle as big-endian 293 assert((Offset + FulleSizeInBytes) <= DataSize && "Invalid fixup size!"); 294 assert(NumBytes <= FulleSizeInBytes && "Invalid fixup size!"); 295 for (unsigned i = 0; i != NumBytes; ++i) { 296 unsigned Idx = FulleSizeInBytes - 1 - i; 297 Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff); 298 } 299 } 300 } 301 302 bool AArch64AsmBackend::mayNeedRelaxation(const MCInst &Inst) const { 303 return false; 304 } 305 306 bool AArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, 307 uint64_t Value, 308 const MCRelaxableFragment *DF, 309 const MCAsmLayout &Layout) const { 310 // FIXME: This isn't correct for AArch64. Just moving the "generic" logic 311 // into the targets for now. 312 // 313 // Relax if the value is too big for a (signed) i8. 314 return int64_t(Value) != int64_t(int8_t(Value)); 315 } 316 317 void AArch64AsmBackend::relaxInstruction(const MCInst &Inst, 318 const MCSubtargetInfo &STI, 319 MCInst &Res) const { 320 llvm_unreachable("AArch64AsmBackend::relaxInstruction() unimplemented"); 321 } 322 323 bool AArch64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { 324 // If the count is not 4-byte aligned, we must be writing data into the text 325 // section (otherwise we have unaligned instructions, and thus have far 326 // bigger problems), so just write zeros instead. 327 OW->WriteZeros(Count % 4); 328 329 // We are properly aligned, so write NOPs as requested. 330 Count /= 4; 331 for (uint64_t i = 0; i != Count; ++i) 332 OW->write32(0xd503201f); 333 return true; 334 } 335 336 namespace { 337 338 namespace CU { 339 340 /// \brief Compact unwind encoding values. 341 enum CompactUnwindEncodings { 342 /// \brief A "frameless" leaf function, where no non-volatile registers are 343 /// saved. The return remains in LR throughout the function. 344 UNWIND_ARM64_MODE_FRAMELESS = 0x02000000, 345 346 /// \brief No compact unwind encoding available. Instead the low 23-bits of 347 /// the compact unwind encoding is the offset of the DWARF FDE in the 348 /// __eh_frame section. This mode is never used in object files. It is only 349 /// generated by the linker in final linked images, which have only DWARF info 350 /// for a function. 351 UNWIND_ARM64_MODE_DWARF = 0x03000000, 352 353 /// \brief This is a standard arm64 prologue where FP/LR are immediately 354 /// pushed on the stack, then SP is copied to FP. If there are any 355 /// non-volatile register saved, they are copied into the stack fame in pairs 356 /// in a contiguous ranger right below the saved FP/LR pair. Any subset of the 357 /// five X pairs and four D pairs can be saved, but the memory layout must be 358 /// in register number order. 359 UNWIND_ARM64_MODE_FRAME = 0x04000000, 360 361 /// \brief Frame register pair encodings. 362 UNWIND_ARM64_FRAME_X19_X20_PAIR = 0x00000001, 363 UNWIND_ARM64_FRAME_X21_X22_PAIR = 0x00000002, 364 UNWIND_ARM64_FRAME_X23_X24_PAIR = 0x00000004, 365 UNWIND_ARM64_FRAME_X25_X26_PAIR = 0x00000008, 366 UNWIND_ARM64_FRAME_X27_X28_PAIR = 0x00000010, 367 UNWIND_ARM64_FRAME_D8_D9_PAIR = 0x00000100, 368 UNWIND_ARM64_FRAME_D10_D11_PAIR = 0x00000200, 369 UNWIND_ARM64_FRAME_D12_D13_PAIR = 0x00000400, 370 UNWIND_ARM64_FRAME_D14_D15_PAIR = 0x00000800 371 }; 372 373 } // end CU namespace 374 375 // FIXME: This should be in a separate file. 376 class DarwinAArch64AsmBackend : public AArch64AsmBackend { 377 const MCRegisterInfo &MRI; 378 379 /// \brief Encode compact unwind stack adjustment for frameless functions. 380 /// See UNWIND_ARM64_FRAMELESS_STACK_SIZE_MASK in compact_unwind_encoding.h. 381 /// The stack size always needs to be 16 byte aligned. 382 uint32_t encodeStackAdjustment(uint32_t StackSize) const { 383 return (StackSize / 16) << 12; 384 } 385 386 public: 387 DarwinAArch64AsmBackend(const Target &T, const MCRegisterInfo &MRI) 388 : AArch64AsmBackend(T, /*IsLittleEndian*/true), MRI(MRI) {} 389 390 MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override { 391 return createAArch64MachObjectWriter(OS, MachO::CPU_TYPE_ARM64, 392 MachO::CPU_SUBTYPE_ARM64_ALL); 393 } 394 395 /// \brief Generate the compact unwind encoding from the CFI directives. 396 uint32_t generateCompactUnwindEncoding( 397 ArrayRef<MCCFIInstruction> Instrs) const override { 398 if (Instrs.empty()) 399 return CU::UNWIND_ARM64_MODE_FRAMELESS; 400 401 bool HasFP = false; 402 unsigned StackSize = 0; 403 404 uint32_t CompactUnwindEncoding = 0; 405 for (size_t i = 0, e = Instrs.size(); i != e; ++i) { 406 const MCCFIInstruction &Inst = Instrs[i]; 407 408 switch (Inst.getOperation()) { 409 default: 410 // Cannot handle this directive: bail out. 411 return CU::UNWIND_ARM64_MODE_DWARF; 412 case MCCFIInstruction::OpDefCfa: { 413 // Defines a frame pointer. 414 assert(getXRegFromWReg(MRI.getLLVMRegNum(Inst.getRegister(), true)) == 415 AArch64::FP && 416 "Invalid frame pointer!"); 417 assert(i + 2 < e && "Insufficient CFI instructions to define a frame!"); 418 419 const MCCFIInstruction &LRPush = Instrs[++i]; 420 assert(LRPush.getOperation() == MCCFIInstruction::OpOffset && 421 "Link register not pushed!"); 422 const MCCFIInstruction &FPPush = Instrs[++i]; 423 assert(FPPush.getOperation() == MCCFIInstruction::OpOffset && 424 "Frame pointer not pushed!"); 425 426 unsigned LRReg = MRI.getLLVMRegNum(LRPush.getRegister(), true); 427 unsigned FPReg = MRI.getLLVMRegNum(FPPush.getRegister(), true); 428 429 LRReg = getXRegFromWReg(LRReg); 430 FPReg = getXRegFromWReg(FPReg); 431 432 assert(LRReg == AArch64::LR && FPReg == AArch64::FP && 433 "Pushing invalid registers for frame!"); 434 435 // Indicate that the function has a frame. 436 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAME; 437 HasFP = true; 438 break; 439 } 440 case MCCFIInstruction::OpDefCfaOffset: { 441 assert(StackSize == 0 && "We already have the CFA offset!"); 442 StackSize = std::abs(Inst.getOffset()); 443 break; 444 } 445 case MCCFIInstruction::OpOffset: { 446 // Registers are saved in pairs. We expect there to be two consecutive 447 // `.cfi_offset' instructions with the appropriate registers specified. 448 unsigned Reg1 = MRI.getLLVMRegNum(Inst.getRegister(), true); 449 if (i + 1 == e) 450 return CU::UNWIND_ARM64_MODE_DWARF; 451 452 const MCCFIInstruction &Inst2 = Instrs[++i]; 453 if (Inst2.getOperation() != MCCFIInstruction::OpOffset) 454 return CU::UNWIND_ARM64_MODE_DWARF; 455 unsigned Reg2 = MRI.getLLVMRegNum(Inst2.getRegister(), true); 456 457 // N.B. The encodings must be in register number order, and the X 458 // registers before the D registers. 459 460 // X19/X20 pair = 0x00000001, 461 // X21/X22 pair = 0x00000002, 462 // X23/X24 pair = 0x00000004, 463 // X25/X26 pair = 0x00000008, 464 // X27/X28 pair = 0x00000010 465 Reg1 = getXRegFromWReg(Reg1); 466 Reg2 = getXRegFromWReg(Reg2); 467 468 if (Reg1 == AArch64::X19 && Reg2 == AArch64::X20 && 469 (CompactUnwindEncoding & 0xF1E) == 0) 470 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X19_X20_PAIR; 471 else if (Reg1 == AArch64::X21 && Reg2 == AArch64::X22 && 472 (CompactUnwindEncoding & 0xF1C) == 0) 473 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X21_X22_PAIR; 474 else if (Reg1 == AArch64::X23 && Reg2 == AArch64::X24 && 475 (CompactUnwindEncoding & 0xF18) == 0) 476 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X23_X24_PAIR; 477 else if (Reg1 == AArch64::X25 && Reg2 == AArch64::X26 && 478 (CompactUnwindEncoding & 0xF10) == 0) 479 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X25_X26_PAIR; 480 else if (Reg1 == AArch64::X27 && Reg2 == AArch64::X28 && 481 (CompactUnwindEncoding & 0xF00) == 0) 482 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_X27_X28_PAIR; 483 else { 484 Reg1 = getDRegFromBReg(Reg1); 485 Reg2 = getDRegFromBReg(Reg2); 486 487 // D8/D9 pair = 0x00000100, 488 // D10/D11 pair = 0x00000200, 489 // D12/D13 pair = 0x00000400, 490 // D14/D15 pair = 0x00000800 491 if (Reg1 == AArch64::D8 && Reg2 == AArch64::D9 && 492 (CompactUnwindEncoding & 0xE00) == 0) 493 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D8_D9_PAIR; 494 else if (Reg1 == AArch64::D10 && Reg2 == AArch64::D11 && 495 (CompactUnwindEncoding & 0xC00) == 0) 496 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D10_D11_PAIR; 497 else if (Reg1 == AArch64::D12 && Reg2 == AArch64::D13 && 498 (CompactUnwindEncoding & 0x800) == 0) 499 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D12_D13_PAIR; 500 else if (Reg1 == AArch64::D14 && Reg2 == AArch64::D15) 501 CompactUnwindEncoding |= CU::UNWIND_ARM64_FRAME_D14_D15_PAIR; 502 else 503 // A pair was pushed which we cannot handle. 504 return CU::UNWIND_ARM64_MODE_DWARF; 505 } 506 507 break; 508 } 509 } 510 } 511 512 if (!HasFP) { 513 // With compact unwind info we can only represent stack adjustments of up 514 // to 65520 bytes. 515 if (StackSize > 65520) 516 return CU::UNWIND_ARM64_MODE_DWARF; 517 518 CompactUnwindEncoding |= CU::UNWIND_ARM64_MODE_FRAMELESS; 519 CompactUnwindEncoding |= encodeStackAdjustment(StackSize); 520 } 521 522 return CompactUnwindEncoding; 523 } 524 525 void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout, 526 const MCFixup &Fixup, const MCFragment *DF, 527 const MCValue &Target, uint64_t &Value, 528 bool &IsResolved) override { 529 // Try to get the encoded value for the fixup as-if we're mapping it into 530 // the instruction. This allows adjustFixupValue() to issue a diagnostic 531 // if the value is invalid. 532 if (IsResolved) 533 (void)adjustFixupValue(Fixup, Value, &Asm.getContext()); 534 } 535 }; 536 537 } // end anonymous namespace 538 539 namespace { 540 541 class ELFAArch64AsmBackend : public AArch64AsmBackend { 542 public: 543 uint8_t OSABI; 544 bool IsILP32; 545 546 ELFAArch64AsmBackend(const Target &T, uint8_t OSABI, bool IsLittleEndian, 547 bool IsILP32) 548 : AArch64AsmBackend(T, IsLittleEndian), OSABI(OSABI), IsILP32(IsILP32) {} 549 550 MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override { 551 return createAArch64ELFObjectWriter(OS, OSABI, IsLittleEndian, IsILP32); 552 } 553 554 void processFixupValue(const MCAssembler &Asm, const MCAsmLayout &Layout, 555 const MCFixup &Fixup, const MCFragment *DF, 556 const MCValue &Target, uint64_t &Value, 557 bool &IsResolved) override; 558 }; 559 560 void ELFAArch64AsmBackend::processFixupValue( 561 const MCAssembler &Asm, const MCAsmLayout &Layout, const MCFixup &Fixup, 562 const MCFragment *DF, const MCValue &Target, uint64_t &Value, 563 bool &IsResolved) { 564 // The ADRP instruction adds some multiple of 0x1000 to the current PC & 565 // ~0xfff. This means that the required offset to reach a symbol can vary by 566 // up to one step depending on where the ADRP is in memory. For example: 567 // 568 // ADRP x0, there 569 // there: 570 // 571 // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and 572 // we'll need that as an offset. At any other address "there" will be in the 573 // same page as the ADRP and the instruction should encode 0x0. Assuming the 574 // section isn't 0x1000-aligned, we therefore need to delegate this decision 575 // to the linker -- a relocation! 576 if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21) 577 IsResolved = false; 578 579 // Try to get the encoded value for the fixup as-if we're mapping it into 580 // the instruction. This allows adjustFixupValue() to issue a diagnostic 581 // if the value is invalid. 582 if (IsResolved) 583 (void)adjustFixupValue(Fixup, Value, &Asm.getContext()); 584 } 585 586 } 587 588 MCAsmBackend *llvm::createAArch64leAsmBackend(const Target &T, 589 const MCRegisterInfo &MRI, 590 const Triple &TheTriple, 591 StringRef CPU, 592 const MCTargetOptions &Options) { 593 if (TheTriple.isOSBinFormatMachO()) 594 return new DarwinAArch64AsmBackend(T, MRI); 595 596 assert(TheTriple.isOSBinFormatELF() && "Expect either MachO or ELF target"); 597 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 598 bool IsILP32 = Options.getABIName() == "ilp32"; 599 return new ELFAArch64AsmBackend(T, OSABI, /*IsLittleEndian=*/true, IsILP32); 600 } 601 602 MCAsmBackend *llvm::createAArch64beAsmBackend(const Target &T, 603 const MCRegisterInfo &MRI, 604 const Triple &TheTriple, 605 StringRef CPU, 606 const MCTargetOptions &Options) { 607 assert(TheTriple.isOSBinFormatELF() && 608 "Big endian is only supported for ELF targets!"); 609 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); 610 bool IsILP32 = Options.getABIName() == "ilp32"; 611 return new ELFAArch64AsmBackend(T, OSABI, /*IsLittleEndian=*/false, IsILP32); 612 } 613