1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "MCTargetDesc/ARMAsmBackend.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMAsmBackendDarwin.h"
13 #include "MCTargetDesc/ARMAsmBackendELF.h"
14 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
15 #include "MCTargetDesc/ARMBaseInfo.h"
16 #include "MCTargetDesc/ARMFixupKinds.h"
17 #include "MCTargetDesc/ARMMCTargetDesc.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/BinaryFormat/ELF.h"
20 #include "llvm/BinaryFormat/MachO.h"
21 #include "llvm/MC/MCAsmBackend.h"
22 #include "llvm/MC/MCAssembler.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCDirectives.h"
25 #include "llvm/MC/MCELFObjectWriter.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCFixupKindInfo.h"
28 #include "llvm/MC/MCMachObjectWriter.h"
29 #include "llvm/MC/MCObjectWriter.h"
30 #include "llvm/MC/MCRegisterInfo.h"
31 #include "llvm/MC/MCSectionELF.h"
32 #include "llvm/MC/MCSectionMachO.h"
33 #include "llvm/MC/MCSubtargetInfo.h"
34 #include "llvm/MC/MCValue.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/Format.h"
38 #include "llvm/Support/TargetParser.h"
39 #include "llvm/Support/raw_ostream.h"
40 using namespace llvm;
41 
42 namespace {
43 class ARMELFObjectWriter : public MCELFObjectTargetWriter {
44 public:
45   ARMELFObjectWriter(uint8_t OSABI)
46       : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
47                                 /*HasRelocationAddend*/ false) {}
48 };
49 } // end anonymous namespace
50 
51 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
52   const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
53       // This table *must* be in the order that the fixup_* kinds are defined in
54       // ARMFixupKinds.h.
55       //
56       // Name                      Offset (bits) Size (bits)     Flags
57       {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
58       {"fixup_t2_ldst_pcrel_12", 0, 32,
59        MCFixupKindInfo::FKF_IsPCRel |
60            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
61       {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
62       {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
63       {"fixup_t2_pcrel_10", 0, 32,
64        MCFixupKindInfo::FKF_IsPCRel |
65            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
66       {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
67       {"fixup_t2_pcrel_9", 0, 32,
68        MCFixupKindInfo::FKF_IsPCRel |
69            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
70       {"fixup_thumb_adr_pcrel_10", 0, 8,
71        MCFixupKindInfo::FKF_IsPCRel |
72            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
73       {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
74       {"fixup_t2_adr_pcrel_12", 0, 32,
75        MCFixupKindInfo::FKF_IsPCRel |
76            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
77       {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
78       {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
79       {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
80       {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
81       {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
82       {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
83       {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
84       {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
85       {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
86       {"fixup_arm_thumb_blx", 0, 32,
87        MCFixupKindInfo::FKF_IsPCRel |
88            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
89       {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
90       {"fixup_arm_thumb_cp", 0, 8,
91        MCFixupKindInfo::FKF_IsPCRel |
92            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
93       {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
94       // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
95       // - 19.
96       {"fixup_arm_movt_hi16", 0, 20, 0},
97       {"fixup_arm_movw_lo16", 0, 20, 0},
98       {"fixup_t2_movt_hi16", 0, 20, 0},
99       {"fixup_t2_movw_lo16", 0, 20, 0},
100       {"fixup_arm_mod_imm", 0, 12, 0},
101       {"fixup_t2_so_imm", 0, 26, 0},
102   };
103   const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
104       // This table *must* be in the order that the fixup_* kinds are defined in
105       // ARMFixupKinds.h.
106       //
107       // Name                      Offset (bits) Size (bits)     Flags
108       {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
109       {"fixup_t2_ldst_pcrel_12", 0, 32,
110        MCFixupKindInfo::FKF_IsPCRel |
111            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
112       {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
113       {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
114       {"fixup_t2_pcrel_10", 0, 32,
115        MCFixupKindInfo::FKF_IsPCRel |
116            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
117       {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
118       {"fixup_t2_pcrel_9", 0, 32,
119        MCFixupKindInfo::FKF_IsPCRel |
120            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
121       {"fixup_thumb_adr_pcrel_10", 8, 8,
122        MCFixupKindInfo::FKF_IsPCRel |
123            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
124       {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
125       {"fixup_t2_adr_pcrel_12", 0, 32,
126        MCFixupKindInfo::FKF_IsPCRel |
127            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
128       {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
129       {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
130       {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
131       {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
132       {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
133       {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
134       {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
135       {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
136       {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
137       {"fixup_arm_thumb_blx", 0, 32,
138        MCFixupKindInfo::FKF_IsPCRel |
139            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
140       {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
141       {"fixup_arm_thumb_cp", 8, 8,
142        MCFixupKindInfo::FKF_IsPCRel |
143            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
144       {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
145       // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
146       // - 19.
147       {"fixup_arm_movt_hi16", 12, 20, 0},
148       {"fixup_arm_movw_lo16", 12, 20, 0},
149       {"fixup_t2_movt_hi16", 12, 20, 0},
150       {"fixup_t2_movw_lo16", 12, 20, 0},
151       {"fixup_arm_mod_imm", 20, 12, 0},
152       {"fixup_t2_so_imm", 26, 6, 0},
153   };
154 
155   if (Kind < FirstTargetFixupKind)
156     return MCAsmBackend::getFixupKindInfo(Kind);
157 
158   assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
159          "Invalid kind!");
160   return (IsLittleEndian ? InfosLE : InfosBE)[Kind - FirstTargetFixupKind];
161 }
162 
163 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
164   switch (Flag) {
165   default:
166     break;
167   case MCAF_Code16:
168     setIsThumb(true);
169     break;
170   case MCAF_Code32:
171     setIsThumb(false);
172     break;
173   }
174 }
175 
176 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op) const {
177   bool HasThumb2 = STI->getFeatureBits()[ARM::FeatureThumb2];
178   bool HasV8MBaselineOps = STI->getFeatureBits()[ARM::HasV8MBaselineOps];
179 
180   switch (Op) {
181   default:
182     return Op;
183   case ARM::tBcc:
184     return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
185   case ARM::tLDRpci:
186     return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
187   case ARM::tADR:
188     return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
189   case ARM::tB:
190     return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
191   case ARM::tCBZ:
192     return ARM::tHINT;
193   case ARM::tCBNZ:
194     return ARM::tHINT;
195   }
196 }
197 
198 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
199   if (getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode())
200     return true;
201   return false;
202 }
203 
204 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
205                                                     uint64_t Value) const {
206   switch ((unsigned)Fixup.getKind()) {
207   case ARM::fixup_arm_thumb_br: {
208     // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
209     // low bit being an implied zero. There's an implied +4 offset for the
210     // branch, so we adjust the other way here to determine what's
211     // encodable.
212     //
213     // Relax if the value is too big for a (signed) i8.
214     int64_t Offset = int64_t(Value) - 4;
215     if (Offset > 2046 || Offset < -2048)
216       return "out of range pc-relative fixup value";
217     break;
218   }
219   case ARM::fixup_arm_thumb_bcc: {
220     // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
221     // low bit being an implied zero. There's an implied +4 offset for the
222     // branch, so we adjust the other way here to determine what's
223     // encodable.
224     //
225     // Relax if the value is too big for a (signed) i8.
226     int64_t Offset = int64_t(Value) - 4;
227     if (Offset > 254 || Offset < -256)
228       return "out of range pc-relative fixup value";
229     break;
230   }
231   case ARM::fixup_thumb_adr_pcrel_10:
232   case ARM::fixup_arm_thumb_cp: {
233     // If the immediate is negative, greater than 1020, or not a multiple
234     // of four, the wide version of the instruction must be used.
235     int64_t Offset = int64_t(Value) - 4;
236     if (Offset & 3)
237       return "misaligned pc-relative fixup value";
238     else if (Offset > 1020 || Offset < 0)
239       return "out of range pc-relative fixup value";
240     break;
241   }
242   case ARM::fixup_arm_thumb_cb: {
243     // If we have a Thumb CBZ or CBNZ instruction and its target is the next
244     // instruction it is is actually out of range for the instruction.
245     // It will be changed to a NOP.
246     int64_t Offset = (Value & ~1);
247     if (Offset == 2)
248       return "will be converted to nop";
249     break;
250   }
251   default:
252     llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
253   }
254   return nullptr;
255 }
256 
257 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
258                                          const MCRelaxableFragment *DF,
259                                          const MCAsmLayout &Layout) const {
260   return reasonForFixupRelaxation(Fixup, Value);
261 }
262 
263 void ARMAsmBackend::relaxInstruction(const MCInst &Inst,
264                                      const MCSubtargetInfo &STI,
265                                      MCInst &Res) const {
266   unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
267 
268   // Sanity check w/ diagnostic if we get here w/ a bogus instruction.
269   if (RelaxedOp == Inst.getOpcode()) {
270     SmallString<256> Tmp;
271     raw_svector_ostream OS(Tmp);
272     Inst.dump_pretty(OS);
273     OS << "\n";
274     report_fatal_error("unexpected instruction to relax: " + OS.str());
275   }
276 
277   // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
278   // have to change the operands too.
279   if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
280       RelaxedOp == ARM::tHINT) {
281     Res.setOpcode(RelaxedOp);
282     Res.addOperand(MCOperand::createImm(0));
283     Res.addOperand(MCOperand::createImm(14));
284     Res.addOperand(MCOperand::createReg(0));
285     return;
286   }
287 
288   // The rest of instructions we're relaxing have the same operands.
289   // We just need to update to the proper opcode.
290   Res = Inst;
291   Res.setOpcode(RelaxedOp);
292 }
293 
294 bool ARMAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
295   const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
296   const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
297   const uint32_t ARMv4_NopEncoding = 0xe1a00000;   // using MOV r0,r0
298   const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
299   if (isThumb()) {
300     const uint16_t nopEncoding =
301         hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
302     uint64_t NumNops = Count / 2;
303     for (uint64_t i = 0; i != NumNops; ++i)
304       OW->write16(nopEncoding);
305     if (Count & 1)
306       OW->write8(0);
307     return true;
308   }
309   // ARM mode
310   const uint32_t nopEncoding =
311       hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
312   uint64_t NumNops = Count / 4;
313   for (uint64_t i = 0; i != NumNops; ++i)
314     OW->write32(nopEncoding);
315   // FIXME: should this function return false when unable to write exactly
316   // 'Count' bytes with NOP encodings?
317   switch (Count % 4) {
318   default:
319     break; // No leftover bytes to write
320   case 1:
321     OW->write8(0);
322     break;
323   case 2:
324     OW->write16(0);
325     break;
326   case 3:
327     OW->write16(0);
328     OW->write8(0xa0);
329     break;
330   }
331 
332   return true;
333 }
334 
335 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
336   if (IsLittleEndian) {
337     // Note that the halfwords are stored high first and low second in thumb;
338     // so we need to swap the fixup value here to map properly.
339     uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
340     Swapped |= (Value & 0x0000FFFF) << 16;
341     return Swapped;
342   } else
343     return Value;
344 }
345 
346 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
347                               bool IsLittleEndian) {
348   uint32_t Value;
349 
350   if (IsLittleEndian) {
351     Value = (SecondHalf & 0xFFFF) << 16;
352     Value |= (FirstHalf & 0xFFFF);
353   } else {
354     Value = (SecondHalf & 0xFFFF);
355     Value |= (FirstHalf & 0xFFFF) << 16;
356   }
357 
358   return Value;
359 }
360 
361 unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
362                                          const MCFixup &Fixup,
363                                          const MCValue &Target, uint64_t Value,
364                                          bool IsResolved, MCContext &Ctx,
365                                          bool IsLittleEndian) const {
366   unsigned Kind = Fixup.getKind();
367 
368   // MachO tries to make .o files that look vaguely pre-linked, so for MOVW/MOVT
369   // and .word relocations they put the Thumb bit into the addend if possible.
370   // Other relocation types don't want this bit though (branches couldn't encode
371   // it if it *was* present, and no other relocations exist) and it can
372   // interfere with checking valid expressions.
373   if (const MCSymbolRefExpr *A = Target.getSymA()) {
374     if (A->hasSubsectionsViaSymbols() && Asm.isThumbFunc(&A->getSymbol()) &&
375         (Kind == FK_Data_4 || Kind == ARM::fixup_arm_movw_lo16 ||
376          Kind == ARM::fixup_arm_movt_hi16 || Kind == ARM::fixup_t2_movw_lo16 ||
377          Kind == ARM::fixup_t2_movt_hi16))
378       Value |= 1;
379   }
380 
381   switch (Kind) {
382   default:
383     Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type");
384     return 0;
385   case FK_Data_1:
386   case FK_Data_2:
387   case FK_Data_4:
388     return Value;
389   case FK_SecRel_2:
390     return Value;
391   case FK_SecRel_4:
392     return Value;
393   case ARM::fixup_arm_movt_hi16:
394     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
395       Value >>= 16;
396     LLVM_FALLTHROUGH;
397   case ARM::fixup_arm_movw_lo16: {
398     unsigned Hi4 = (Value & 0xF000) >> 12;
399     unsigned Lo12 = Value & 0x0FFF;
400     // inst{19-16} = Hi4;
401     // inst{11-0} = Lo12;
402     Value = (Hi4 << 16) | (Lo12);
403     return Value;
404   }
405   case ARM::fixup_t2_movt_hi16:
406     if (IsResolved || !STI->getTargetTriple().isOSBinFormatELF())
407       Value >>= 16;
408     LLVM_FALLTHROUGH;
409   case ARM::fixup_t2_movw_lo16: {
410     unsigned Hi4 = (Value & 0xF000) >> 12;
411     unsigned i = (Value & 0x800) >> 11;
412     unsigned Mid3 = (Value & 0x700) >> 8;
413     unsigned Lo8 = Value & 0x0FF;
414     // inst{19-16} = Hi4;
415     // inst{26} = i;
416     // inst{14-12} = Mid3;
417     // inst{7-0} = Lo8;
418     Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
419     return swapHalfWords(Value, IsLittleEndian);
420   }
421   case ARM::fixup_arm_ldst_pcrel_12:
422     // ARM PC-relative values are offset by 8.
423     Value -= 4;
424     LLVM_FALLTHROUGH;
425   case ARM::fixup_t2_ldst_pcrel_12: {
426     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
427     Value -= 4;
428     bool isAdd = true;
429     if ((int64_t)Value < 0) {
430       Value = -Value;
431       isAdd = false;
432     }
433     if (Value >= 4096) {
434       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
435       return 0;
436     }
437     Value |= isAdd << 23;
438 
439     // Same addressing mode as fixup_arm_pcrel_10,
440     // but with 16-bit halfwords swapped.
441     if (Kind == ARM::fixup_t2_ldst_pcrel_12)
442       return swapHalfWords(Value, IsLittleEndian);
443 
444     return Value;
445   }
446   case ARM::fixup_arm_adr_pcrel_12: {
447     // ARM PC-relative values are offset by 8.
448     Value -= 8;
449     unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
450     if ((int64_t)Value < 0) {
451       Value = -Value;
452       opc = 2; // 0b0010
453     }
454     if (ARM_AM::getSOImmVal(Value) == -1) {
455       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
456       return 0;
457     }
458     // Encode the immediate and shift the opcode into place.
459     return ARM_AM::getSOImmVal(Value) | (opc << 21);
460   }
461 
462   case ARM::fixup_t2_adr_pcrel_12: {
463     Value -= 4;
464     unsigned opc = 0;
465     if ((int64_t)Value < 0) {
466       Value = -Value;
467       opc = 5;
468     }
469 
470     uint32_t out = (opc << 21);
471     out |= (Value & 0x800) << 15;
472     out |= (Value & 0x700) << 4;
473     out |= (Value & 0x0FF);
474 
475     return swapHalfWords(out, IsLittleEndian);
476   }
477 
478   case ARM::fixup_arm_condbranch:
479   case ARM::fixup_arm_uncondbranch:
480   case ARM::fixup_arm_uncondbl:
481   case ARM::fixup_arm_condbl:
482   case ARM::fixup_arm_blx:
483     // These values don't encode the low two bits since they're always zero.
484     // Offset by 8 just as above.
485     if (const MCSymbolRefExpr *SRE =
486             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
487       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
488         return 0;
489     return 0xffffff & ((Value - 8) >> 2);
490   case ARM::fixup_t2_uncondbranch: {
491     Value = Value - 4;
492     Value >>= 1; // Low bit is not encoded.
493 
494     uint32_t out = 0;
495     bool I = Value & 0x800000;
496     bool J1 = Value & 0x400000;
497     bool J2 = Value & 0x200000;
498     J1 ^= I;
499     J2 ^= I;
500 
501     out |= I << 26;                 // S bit
502     out |= !J1 << 13;               // J1 bit
503     out |= !J2 << 11;               // J2 bit
504     out |= (Value & 0x1FF800) << 5; // imm6 field
505     out |= (Value & 0x0007FF);      // imm11 field
506 
507     return swapHalfWords(out, IsLittleEndian);
508   }
509   case ARM::fixup_t2_condbranch: {
510     Value = Value - 4;
511     Value >>= 1; // Low bit is not encoded.
512 
513     uint64_t out = 0;
514     out |= (Value & 0x80000) << 7; // S bit
515     out |= (Value & 0x40000) >> 7; // J2 bit
516     out |= (Value & 0x20000) >> 4; // J1 bit
517     out |= (Value & 0x1F800) << 5; // imm6 field
518     out |= (Value & 0x007FF);      // imm11 field
519 
520     return swapHalfWords(out, IsLittleEndian);
521   }
522   case ARM::fixup_arm_thumb_bl: {
523     // FIXME: We get both thumb1 and thumb2 in here, so we can only check for
524     // the less strict thumb2 value.
525     if (!isInt<26>(Value - 4)) {
526       Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
527       return 0;
528     }
529 
530     // The value doesn't encode the low bit (always zero) and is offset by
531     // four. The 32-bit immediate value is encoded as
532     //   imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
533     // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
534     // The value is encoded into disjoint bit positions in the destination
535     // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
536     // J = either J1 or J2 bit
537     //
538     //   BL:  xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
539     //
540     // Note that the halfwords are stored high first, low second; so we need
541     // to transpose the fixup value here to map properly.
542     uint32_t offset = (Value - 4) >> 1;
543     uint32_t signBit = (offset & 0x800000) >> 23;
544     uint32_t I1Bit = (offset & 0x400000) >> 22;
545     uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
546     uint32_t I2Bit = (offset & 0x200000) >> 21;
547     uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
548     uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
549     uint32_t imm11Bits = (offset & 0x000007FF);
550 
551     uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
552     uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
553                            (uint16_t)imm11Bits);
554     return joinHalfWords(FirstHalf, SecondHalf, IsLittleEndian);
555   }
556   case ARM::fixup_arm_thumb_blx: {
557     // The value doesn't encode the low two bits (always zero) and is offset by
558     // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
559     //   imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
560     // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
561     // The value is encoded into disjoint bit positions in the destination
562     // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
563     // J = either J1 or J2 bit, 0 = zero.
564     //
565     //   BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
566     //
567     // Note that the halfwords are stored high first, low second; so we need
568     // to transpose the fixup value here to map properly.
569     if (Value % 4 != 0) {
570       Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
571       return 0;
572     }
573 
574     uint32_t offset = (Value - 4) >> 2;
575     if (const MCSymbolRefExpr *SRE =
576             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
577       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
578         offset = 0;
579     uint32_t signBit = (offset & 0x400000) >> 22;
580     uint32_t I1Bit = (offset & 0x200000) >> 21;
581     uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
582     uint32_t I2Bit = (offset & 0x100000) >> 20;
583     uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
584     uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
585     uint32_t imm10LBits = (offset & 0x3FF);
586 
587     uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
588     uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
589                            ((uint16_t)imm10LBits) << 1);
590     return joinHalfWords(FirstHalf, SecondHalf, IsLittleEndian);
591   }
592   case ARM::fixup_thumb_adr_pcrel_10:
593   case ARM::fixup_arm_thumb_cp:
594     // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
595     // could have an error on our hands.
596     if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
597       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
598       if (FixupDiagnostic) {
599         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
600         return 0;
601       }
602     }
603     // Offset by 4, and don't encode the low two bits.
604     return ((Value - 4) >> 2) & 0xff;
605   case ARM::fixup_arm_thumb_cb: {
606     // CB instructions can only branch to offsets in [4, 126] in multiples of 2
607     // so ensure that the raw value LSB is zero and it lies in [2, 130].
608     // An offset of 2 will be relaxed to a NOP.
609     if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
610       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
611       return 0;
612     }
613     // Offset by 4 and don't encode the lower bit, which is always 0.
614     // FIXME: diagnose if no Thumb2
615     uint32_t Binary = (Value - 4) >> 1;
616     return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
617   }
618   case ARM::fixup_arm_thumb_br:
619     // Offset by 4 and don't encode the lower bit, which is always 0.
620     if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
621         !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
622       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
623       if (FixupDiagnostic) {
624         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
625         return 0;
626       }
627     }
628     return ((Value - 4) >> 1) & 0x7ff;
629   case ARM::fixup_arm_thumb_bcc:
630     // Offset by 4 and don't encode the lower bit, which is always 0.
631     if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
632       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
633       if (FixupDiagnostic) {
634         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
635         return 0;
636       }
637     }
638     return ((Value - 4) >> 1) & 0xff;
639   case ARM::fixup_arm_pcrel_10_unscaled: {
640     Value = Value - 8; // ARM fixups offset by an additional word and don't
641                        // need to adjust for the half-word ordering.
642     bool isAdd = true;
643     if ((int64_t)Value < 0) {
644       Value = -Value;
645       isAdd = false;
646     }
647     // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
648     if (Value >= 256) {
649       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
650       return 0;
651     }
652     Value = (Value & 0xf) | ((Value & 0xf0) << 4);
653     return Value | (isAdd << 23);
654   }
655   case ARM::fixup_arm_pcrel_10:
656     Value = Value - 4; // ARM fixups offset by an additional word and don't
657                        // need to adjust for the half-word ordering.
658     LLVM_FALLTHROUGH;
659   case ARM::fixup_t2_pcrel_10: {
660     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
661     Value = Value - 4;
662     bool isAdd = true;
663     if ((int64_t)Value < 0) {
664       Value = -Value;
665       isAdd = false;
666     }
667     // These values don't encode the low two bits since they're always zero.
668     Value >>= 2;
669     if (Value >= 256) {
670       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
671       return 0;
672     }
673     Value |= isAdd << 23;
674 
675     // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
676     // swapped.
677     if (Kind == ARM::fixup_t2_pcrel_10)
678       return swapHalfWords(Value, IsLittleEndian);
679 
680     return Value;
681   }
682   case ARM::fixup_arm_pcrel_9:
683     Value = Value - 4; // ARM fixups offset by an additional word and don't
684                        // need to adjust for the half-word ordering.
685     LLVM_FALLTHROUGH;
686   case ARM::fixup_t2_pcrel_9: {
687     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
688     Value = Value - 4;
689     bool isAdd = true;
690     if ((int64_t)Value < 0) {
691       Value = -Value;
692       isAdd = false;
693     }
694     // These values don't encode the low bit since it's always zero.
695     if (Value & 1) {
696       Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
697       return 0;
698     }
699     Value >>= 1;
700     if (Value >= 256) {
701       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
702       return 0;
703     }
704     Value |= isAdd << 23;
705 
706     // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
707     // swapped.
708     if (Kind == ARM::fixup_t2_pcrel_9)
709       return swapHalfWords(Value, IsLittleEndian);
710 
711     return Value;
712   }
713   case ARM::fixup_arm_mod_imm:
714     Value = ARM_AM::getSOImmVal(Value);
715     if (Value >> 12) {
716       Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
717       return 0;
718     }
719     return Value;
720   case ARM::fixup_t2_so_imm: {
721     Value = ARM_AM::getT2SOImmVal(Value);
722     if ((int64_t)Value < 0) {
723       Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
724       return 0;
725     }
726     // Value will contain a 12-bit value broken up into a 4-bit shift in bits
727     // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
728     // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
729     // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
730     // half-word.
731     uint64_t EncValue = 0;
732     EncValue |= (Value & 0x800) << 15;
733     EncValue |= (Value & 0x700) << 4;
734     EncValue |= (Value & 0xff);
735     return swapHalfWords(EncValue, IsLittleEndian);
736   }
737   }
738 }
739 
740 bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm,
741                                           const MCFixup &Fixup,
742                                           const MCValue &Target) {
743   const MCSymbolRefExpr *A = Target.getSymA();
744   const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
745   const unsigned FixupKind = Fixup.getKind() ;
746   if ((unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_bl) {
747     assert(Sym && "How did we resolve this?");
748 
749     // If the symbol is external the linker will handle it.
750     // FIXME: Should we handle it as an optimization?
751 
752     // If the symbol is out of range, produce a relocation and hope the
753     // linker can handle it. GNU AS produces an error in this case.
754     if (Sym->isExternal())
755       return true;
756   }
757   // Create relocations for unconditional branches to function symbols with
758   // different execution mode in ELF binaries.
759   if (Sym && Sym->isELF()) {
760     unsigned Type = dyn_cast<MCSymbolELF>(Sym)->getType();
761     if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
762       if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
763         return true;
764       if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
765                                     FixupKind == ARM::fixup_arm_thumb_bl ||
766                                     FixupKind == ARM::fixup_t2_condbranch ||
767                                     FixupKind == ARM::fixup_t2_uncondbranch))
768         return true;
769     }
770   }
771   // We must always generate a relocation for BL/BLX instructions if we have
772   // a symbol to reference, as the linker relies on knowing the destination
773   // symbol's thumb-ness to get interworking right.
774   if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
775             FixupKind == ARM::fixup_arm_blx ||
776             FixupKind == ARM::fixup_arm_uncondbl ||
777             FixupKind == ARM::fixup_arm_condbl))
778     return true;
779   return false;
780 }
781 
782 /// getFixupKindNumBytes - The number of bytes the fixup may change.
783 static unsigned getFixupKindNumBytes(unsigned Kind) {
784   switch (Kind) {
785   default:
786     llvm_unreachable("Unknown fixup kind!");
787 
788   case FK_Data_1:
789   case ARM::fixup_arm_thumb_bcc:
790   case ARM::fixup_arm_thumb_cp:
791   case ARM::fixup_thumb_adr_pcrel_10:
792     return 1;
793 
794   case FK_Data_2:
795   case ARM::fixup_arm_thumb_br:
796   case ARM::fixup_arm_thumb_cb:
797   case ARM::fixup_arm_mod_imm:
798     return 2;
799 
800   case ARM::fixup_arm_pcrel_10_unscaled:
801   case ARM::fixup_arm_ldst_pcrel_12:
802   case ARM::fixup_arm_pcrel_10:
803   case ARM::fixup_arm_pcrel_9:
804   case ARM::fixup_arm_adr_pcrel_12:
805   case ARM::fixup_arm_uncondbl:
806   case ARM::fixup_arm_condbl:
807   case ARM::fixup_arm_blx:
808   case ARM::fixup_arm_condbranch:
809   case ARM::fixup_arm_uncondbranch:
810     return 3;
811 
812   case FK_Data_4:
813   case ARM::fixup_t2_ldst_pcrel_12:
814   case ARM::fixup_t2_condbranch:
815   case ARM::fixup_t2_uncondbranch:
816   case ARM::fixup_t2_pcrel_10:
817   case ARM::fixup_t2_pcrel_9:
818   case ARM::fixup_t2_adr_pcrel_12:
819   case ARM::fixup_arm_thumb_bl:
820   case ARM::fixup_arm_thumb_blx:
821   case ARM::fixup_arm_movt_hi16:
822   case ARM::fixup_arm_movw_lo16:
823   case ARM::fixup_t2_movt_hi16:
824   case ARM::fixup_t2_movw_lo16:
825   case ARM::fixup_t2_so_imm:
826     return 4;
827 
828   case FK_SecRel_2:
829     return 2;
830   case FK_SecRel_4:
831     return 4;
832   }
833 }
834 
835 /// getFixupKindContainerSizeBytes - The number of bytes of the
836 /// container involved in big endian.
837 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
838   switch (Kind) {
839   default:
840     llvm_unreachable("Unknown fixup kind!");
841 
842   case FK_Data_1:
843     return 1;
844   case FK_Data_2:
845     return 2;
846   case FK_Data_4:
847     return 4;
848 
849   case ARM::fixup_arm_thumb_bcc:
850   case ARM::fixup_arm_thumb_cp:
851   case ARM::fixup_thumb_adr_pcrel_10:
852   case ARM::fixup_arm_thumb_br:
853   case ARM::fixup_arm_thumb_cb:
854     // Instruction size is 2 bytes.
855     return 2;
856 
857   case ARM::fixup_arm_pcrel_10_unscaled:
858   case ARM::fixup_arm_ldst_pcrel_12:
859   case ARM::fixup_arm_pcrel_10:
860   case ARM::fixup_arm_adr_pcrel_12:
861   case ARM::fixup_arm_uncondbl:
862   case ARM::fixup_arm_condbl:
863   case ARM::fixup_arm_blx:
864   case ARM::fixup_arm_condbranch:
865   case ARM::fixup_arm_uncondbranch:
866   case ARM::fixup_t2_ldst_pcrel_12:
867   case ARM::fixup_t2_condbranch:
868   case ARM::fixup_t2_uncondbranch:
869   case ARM::fixup_t2_pcrel_10:
870   case ARM::fixup_t2_adr_pcrel_12:
871   case ARM::fixup_arm_thumb_bl:
872   case ARM::fixup_arm_thumb_blx:
873   case ARM::fixup_arm_movt_hi16:
874   case ARM::fixup_arm_movw_lo16:
875   case ARM::fixup_t2_movt_hi16:
876   case ARM::fixup_t2_movw_lo16:
877   case ARM::fixup_arm_mod_imm:
878   case ARM::fixup_t2_so_imm:
879     // Instruction size is 4 bytes.
880     return 4;
881   }
882 }
883 
884 void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
885                                const MCValue &Target,
886                                MutableArrayRef<char> Data, uint64_t Value,
887                                bool IsResolved) const {
888   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
889   MCContext &Ctx = Asm.getContext();
890   Value = adjustFixupValue(Asm, Fixup, Target, Value, IsResolved, Ctx,
891                            IsLittleEndian);
892   if (!Value)
893     return; // Doesn't change encoding.
894 
895   unsigned Offset = Fixup.getOffset();
896   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
897 
898   // Used to point to big endian bytes.
899   unsigned FullSizeBytes;
900   if (!IsLittleEndian) {
901     FullSizeBytes = getFixupKindContainerSizeBytes(Fixup.getKind());
902     assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
903     assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
904   }
905 
906   // For each byte of the fragment that the fixup touches, mask in the bits from
907   // the fixup value. The Value has been "split up" into the appropriate
908   // bitfields above.
909   for (unsigned i = 0; i != NumBytes; ++i) {
910     unsigned Idx = IsLittleEndian ? i : (FullSizeBytes - 1 - i);
911     Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
912   }
913 }
914 
915 namespace CU {
916 
917 /// \brief Compact unwind encoding values.
918 enum CompactUnwindEncodings {
919   UNWIND_ARM_MODE_MASK                         = 0x0F000000,
920   UNWIND_ARM_MODE_FRAME                        = 0x01000000,
921   UNWIND_ARM_MODE_FRAME_D                      = 0x02000000,
922   UNWIND_ARM_MODE_DWARF                        = 0x04000000,
923 
924   UNWIND_ARM_FRAME_STACK_ADJUST_MASK           = 0x00C00000,
925 
926   UNWIND_ARM_FRAME_FIRST_PUSH_R4               = 0x00000001,
927   UNWIND_ARM_FRAME_FIRST_PUSH_R5               = 0x00000002,
928   UNWIND_ARM_FRAME_FIRST_PUSH_R6               = 0x00000004,
929 
930   UNWIND_ARM_FRAME_SECOND_PUSH_R8              = 0x00000008,
931   UNWIND_ARM_FRAME_SECOND_PUSH_R9              = 0x00000010,
932   UNWIND_ARM_FRAME_SECOND_PUSH_R10             = 0x00000020,
933   UNWIND_ARM_FRAME_SECOND_PUSH_R11             = 0x00000040,
934   UNWIND_ARM_FRAME_SECOND_PUSH_R12             = 0x00000080,
935 
936   UNWIND_ARM_FRAME_D_REG_COUNT_MASK            = 0x00000F00,
937 
938   UNWIND_ARM_DWARF_SECTION_OFFSET              = 0x00FFFFFF
939 };
940 
941 } // end CU namespace
942 
943 /// Generate compact unwind encoding for the function based on the CFI
944 /// instructions. If the CFI instructions describe a frame that cannot be
945 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
946 /// tells the runtime to fallback and unwind using dwarf.
947 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
948     ArrayRef<MCCFIInstruction> Instrs) const {
949   DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
950   // Only armv7k uses CFI based unwinding.
951   if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
952     return 0;
953   // No .cfi directives means no frame.
954   if (Instrs.empty())
955     return 0;
956   // Start off assuming CFA is at SP+0.
957   int CFARegister = ARM::SP;
958   int CFARegisterOffset = 0;
959   // Mark savable registers as initially unsaved
960   DenseMap<unsigned, int> RegOffsets;
961   int FloatRegCount = 0;
962   // Process each .cfi directive and build up compact unwind info.
963   for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
964     int Reg;
965     const MCCFIInstruction &Inst = Instrs[i];
966     switch (Inst.getOperation()) {
967     case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
968       CFARegisterOffset = -Inst.getOffset();
969       CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true);
970       break;
971     case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
972       CFARegisterOffset = -Inst.getOffset();
973       break;
974     case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
975       CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true);
976       break;
977     case MCCFIInstruction::OpOffset: // DW_CFA_offset
978       Reg = MRI.getLLVMRegNum(Inst.getRegister(), true);
979       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
980         RegOffsets[Reg] = Inst.getOffset();
981       else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
982         RegOffsets[Reg] = Inst.getOffset();
983         ++FloatRegCount;
984       } else {
985         DEBUG_WITH_TYPE("compact-unwind",
986                         llvm::dbgs() << ".cfi_offset on unknown register="
987                                      << Inst.getRegister() << "\n");
988         return CU::UNWIND_ARM_MODE_DWARF;
989       }
990       break;
991     case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
992       // Ignore
993       break;
994     default:
995       // Directive not convertable to compact unwind, bail out.
996       DEBUG_WITH_TYPE("compact-unwind",
997                       llvm::dbgs()
998                           << "CFI directive not compatiable with comact "
999                              "unwind encoding, opcode=" << Inst.getOperation()
1000                           << "\n");
1001       return CU::UNWIND_ARM_MODE_DWARF;
1002       break;
1003     }
1004   }
1005 
1006   // If no frame set up, return no unwind info.
1007   if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
1008     return 0;
1009 
1010   // Verify standard frame (lr/r7) was used.
1011   if (CFARegister != ARM::R7) {
1012     DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
1013                                                    << CFARegister
1014                                                    << " instead of r7\n");
1015     return CU::UNWIND_ARM_MODE_DWARF;
1016   }
1017   int StackAdjust = CFARegisterOffset - 8;
1018   if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
1019     DEBUG_WITH_TYPE("compact-unwind",
1020                     llvm::dbgs()
1021                         << "LR not saved as standard frame, StackAdjust="
1022                         << StackAdjust
1023                         << ", CFARegisterOffset=" << CFARegisterOffset
1024                         << ", lr save at offset=" << RegOffsets[14] << "\n");
1025     return CU::UNWIND_ARM_MODE_DWARF;
1026   }
1027   if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
1028     DEBUG_WITH_TYPE("compact-unwind",
1029                     llvm::dbgs() << "r7 not saved as standard frame\n");
1030     return CU::UNWIND_ARM_MODE_DWARF;
1031   }
1032   uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1033 
1034   // If var-args are used, there may be a stack adjust required.
1035   switch (StackAdjust) {
1036   case 0:
1037     break;
1038   case 4:
1039     CompactUnwindEncoding |= 0x00400000;
1040     break;
1041   case 8:
1042     CompactUnwindEncoding |= 0x00800000;
1043     break;
1044   case 12:
1045     CompactUnwindEncoding |= 0x00C00000;
1046     break;
1047   default:
1048     DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
1049                                           << ".cfi_def_cfa stack adjust ("
1050                                           << StackAdjust << ") out of range\n");
1051     return CU::UNWIND_ARM_MODE_DWARF;
1052   }
1053 
1054   // If r6 is saved, it must be right below r7.
1055   static struct {
1056     unsigned Reg;
1057     unsigned Encoding;
1058   } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1059                    {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1060                    {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1061                    {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1062                    {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1063                    {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1064                    {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1065                    {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1066 
1067   int CurOffset = -8 - StackAdjust;
1068   for (auto CSReg : GPRCSRegs) {
1069     auto Offset = RegOffsets.find(CSReg.Reg);
1070     if (Offset == RegOffsets.end())
1071       continue;
1072 
1073     int RegOffset = Offset->second;
1074     if (RegOffset != CurOffset - 4) {
1075       DEBUG_WITH_TYPE("compact-unwind",
1076                       llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
1077                                    << RegOffset << " but only supported at "
1078                                    << CurOffset << "\n");
1079       return CU::UNWIND_ARM_MODE_DWARF;
1080     }
1081     CompactUnwindEncoding |= CSReg.Encoding;
1082     CurOffset -= 4;
1083   }
1084 
1085   // If no floats saved, we are done.
1086   if (FloatRegCount == 0)
1087     return CompactUnwindEncoding;
1088 
1089   // Switch mode to include D register saving.
1090   CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1091   CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1092 
1093   // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1094   // but needs coordination with the linker and libunwind.
1095   if (FloatRegCount > 4) {
1096     DEBUG_WITH_TYPE("compact-unwind",
1097                     llvm::dbgs() << "unsupported number of D registers saved ("
1098                                  << FloatRegCount << ")\n");
1099       return CU::UNWIND_ARM_MODE_DWARF;
1100   }
1101 
1102   // Floating point registers must either be saved sequentially, or we defer to
1103   // DWARF. No gaps allowed here so check that each saved d-register is
1104   // precisely where it should be.
1105   static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
1106   for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1107     auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
1108     if (Offset == RegOffsets.end()) {
1109       DEBUG_WITH_TYPE("compact-unwind",
1110                       llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1111                                    << MRI.getName(FPRCSRegs[Idx])
1112                                    << " not saved\n");
1113       return CU::UNWIND_ARM_MODE_DWARF;
1114     } else if (Offset->second != CurOffset - 8) {
1115       DEBUG_WITH_TYPE("compact-unwind",
1116                       llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1117                                    << MRI.getName(FPRCSRegs[Idx])
1118                                    << " saved at " << Offset->second
1119                                    << ", expected at " << CurOffset - 8
1120                                    << "\n");
1121       return CU::UNWIND_ARM_MODE_DWARF;
1122     }
1123     CurOffset -= 8;
1124   }
1125 
1126   return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1127 }
1128 
1129 static MachO::CPUSubTypeARM getMachOSubTypeFromArch(StringRef Arch) {
1130   ARM::ArchKind AK = ARM::parseArch(Arch);
1131   switch (AK) {
1132   default:
1133     return MachO::CPU_SUBTYPE_ARM_V7;
1134   case ARM::ArchKind::ARMV4T:
1135     return MachO::CPU_SUBTYPE_ARM_V4T;
1136   case ARM::ArchKind::ARMV5T:
1137   case ARM::ArchKind::ARMV5TE:
1138   case ARM::ArchKind::ARMV5TEJ:
1139     return MachO::CPU_SUBTYPE_ARM_V5;
1140   case ARM::ArchKind::ARMV6:
1141   case ARM::ArchKind::ARMV6K:
1142     return MachO::CPU_SUBTYPE_ARM_V6;
1143   case ARM::ArchKind::ARMV7A:
1144     return MachO::CPU_SUBTYPE_ARM_V7;
1145   case ARM::ArchKind::ARMV7S:
1146     return MachO::CPU_SUBTYPE_ARM_V7S;
1147   case ARM::ArchKind::ARMV7K:
1148     return MachO::CPU_SUBTYPE_ARM_V7K;
1149   case ARM::ArchKind::ARMV6M:
1150     return MachO::CPU_SUBTYPE_ARM_V6M;
1151   case ARM::ArchKind::ARMV7M:
1152     return MachO::CPU_SUBTYPE_ARM_V7M;
1153   case ARM::ArchKind::ARMV7EM:
1154     return MachO::CPU_SUBTYPE_ARM_V7EM;
1155   }
1156 }
1157 
1158 MCAsmBackend *llvm::createARMAsmBackend(const Target &T,
1159                                         const MCRegisterInfo &MRI,
1160                                         const Triple &TheTriple, StringRef CPU,
1161                                         const MCTargetOptions &Options,
1162                                         bool isLittle) {
1163   switch (TheTriple.getObjectFormat()) {
1164   default:
1165     llvm_unreachable("unsupported object format");
1166   case Triple::MachO: {
1167     MachO::CPUSubTypeARM CS = getMachOSubTypeFromArch(TheTriple.getArchName());
1168     return new ARMAsmBackendDarwin(T, TheTriple, MRI, CS);
1169   }
1170   case Triple::COFF:
1171     assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
1172     return new ARMAsmBackendWinCOFF(T, TheTriple);
1173   case Triple::ELF:
1174     assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
1175     uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1176     return new ARMAsmBackendELF(T, TheTriple, OSABI, isLittle);
1177   }
1178 }
1179 
1180 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1181                                           const MCRegisterInfo &MRI,
1182                                           const Triple &TT, StringRef CPU,
1183                                           const MCTargetOptions &Options) {
1184   return createARMAsmBackend(T, MRI, TT, CPU, Options, true);
1185 }
1186 
1187 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1188                                           const MCRegisterInfo &MRI,
1189                                           const Triple &TT, StringRef CPU,
1190                                           const MCTargetOptions &Options) {
1191   return createARMAsmBackend(T, MRI, TT, CPU, Options, false);
1192 }
1193 
1194 MCAsmBackend *llvm::createThumbLEAsmBackend(const Target &T,
1195                                             const MCRegisterInfo &MRI,
1196                                             const Triple &TT, StringRef CPU,
1197                                             const MCTargetOptions &Options) {
1198   return createARMAsmBackend(T, MRI, TT, CPU, Options, true);
1199 }
1200 
1201 MCAsmBackend *llvm::createThumbBEAsmBackend(const Target &T,
1202                                             const MCRegisterInfo &MRI,
1203                                             const Triple &TT, StringRef CPU,
1204                                             const MCTargetOptions &Options) {
1205   return createARMAsmBackend(T, MRI, TT, CPU, Options, false);
1206 }
1207