1 //===-- ARMAsmBackend.cpp - ARM Assembler Backend -------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "MCTargetDesc/ARMAsmBackend.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMAsmBackendDarwin.h"
13 #include "MCTargetDesc/ARMAsmBackendELF.h"
14 #include "MCTargetDesc/ARMAsmBackendWinCOFF.h"
15 #include "MCTargetDesc/ARMBaseInfo.h"
16 #include "MCTargetDesc/ARMFixupKinds.h"
17 #include "MCTargetDesc/ARMMCTargetDesc.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/BinaryFormat/ELF.h"
20 #include "llvm/BinaryFormat/MachO.h"
21 #include "llvm/MC/MCAsmBackend.h"
22 #include "llvm/MC/MCAssembler.h"
23 #include "llvm/MC/MCContext.h"
24 #include "llvm/MC/MCDirectives.h"
25 #include "llvm/MC/MCELFObjectWriter.h"
26 #include "llvm/MC/MCExpr.h"
27 #include "llvm/MC/MCFixupKindInfo.h"
28 #include "llvm/MC/MCMachObjectWriter.h"
29 #include "llvm/MC/MCObjectWriter.h"
30 #include "llvm/MC/MCRegisterInfo.h"
31 #include "llvm/MC/MCSectionELF.h"
32 #include "llvm/MC/MCSectionMachO.h"
33 #include "llvm/MC/MCSubtargetInfo.h"
34 #include "llvm/MC/MCValue.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/Format.h"
38 #include "llvm/Support/TargetParser.h"
39 #include "llvm/Support/raw_ostream.h"
40 using namespace llvm;
41 
42 namespace {
43 class ARMELFObjectWriter : public MCELFObjectTargetWriter {
44 public:
45   ARMELFObjectWriter(uint8_t OSABI)
46       : MCELFObjectTargetWriter(/*Is64Bit*/ false, OSABI, ELF::EM_ARM,
47                                 /*HasRelocationAddend*/ false) {}
48 };
49 } // end anonymous namespace
50 
51 const MCFixupKindInfo &ARMAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
52   const static MCFixupKindInfo InfosLE[ARM::NumTargetFixupKinds] = {
53       // This table *must* be in the order that the fixup_* kinds are defined in
54       // ARMFixupKinds.h.
55       //
56       // Name                      Offset (bits) Size (bits)     Flags
57       {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
58       {"fixup_t2_ldst_pcrel_12", 0, 32,
59        MCFixupKindInfo::FKF_IsPCRel |
60            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
61       {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
62       {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
63       {"fixup_t2_pcrel_10", 0, 32,
64        MCFixupKindInfo::FKF_IsPCRel |
65            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
66       {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
67       {"fixup_t2_pcrel_9", 0, 32,
68        MCFixupKindInfo::FKF_IsPCRel |
69            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
70       {"fixup_thumb_adr_pcrel_10", 0, 8,
71        MCFixupKindInfo::FKF_IsPCRel |
72            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
73       {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
74       {"fixup_t2_adr_pcrel_12", 0, 32,
75        MCFixupKindInfo::FKF_IsPCRel |
76            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
77       {"fixup_arm_condbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
78       {"fixup_arm_uncondbranch", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
79       {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
80       {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
81       {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
82       {"fixup_arm_uncondbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
83       {"fixup_arm_condbl", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
84       {"fixup_arm_blx", 0, 24, MCFixupKindInfo::FKF_IsPCRel},
85       {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
86       {"fixup_arm_thumb_blx", 0, 32,
87        MCFixupKindInfo::FKF_IsPCRel |
88            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
89       {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
90       {"fixup_arm_thumb_cp", 0, 8,
91        MCFixupKindInfo::FKF_IsPCRel |
92            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
93       {"fixup_arm_thumb_bcc", 0, 8, MCFixupKindInfo::FKF_IsPCRel},
94       // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
95       // - 19.
96       {"fixup_arm_movt_hi16", 0, 20, 0},
97       {"fixup_arm_movw_lo16", 0, 20, 0},
98       {"fixup_t2_movt_hi16", 0, 20, 0},
99       {"fixup_t2_movw_lo16", 0, 20, 0},
100       {"fixup_arm_mod_imm", 0, 12, 0},
101       {"fixup_t2_so_imm", 0, 26, 0},
102   };
103   const static MCFixupKindInfo InfosBE[ARM::NumTargetFixupKinds] = {
104       // This table *must* be in the order that the fixup_* kinds are defined in
105       // ARMFixupKinds.h.
106       //
107       // Name                      Offset (bits) Size (bits)     Flags
108       {"fixup_arm_ldst_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
109       {"fixup_t2_ldst_pcrel_12", 0, 32,
110        MCFixupKindInfo::FKF_IsPCRel |
111            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
112       {"fixup_arm_pcrel_10_unscaled", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
113       {"fixup_arm_pcrel_10", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
114       {"fixup_t2_pcrel_10", 0, 32,
115        MCFixupKindInfo::FKF_IsPCRel |
116            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
117       {"fixup_arm_pcrel_9", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
118       {"fixup_t2_pcrel_9", 0, 32,
119        MCFixupKindInfo::FKF_IsPCRel |
120            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
121       {"fixup_thumb_adr_pcrel_10", 8, 8,
122        MCFixupKindInfo::FKF_IsPCRel |
123            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
124       {"fixup_arm_adr_pcrel_12", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
125       {"fixup_t2_adr_pcrel_12", 0, 32,
126        MCFixupKindInfo::FKF_IsPCRel |
127            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
128       {"fixup_arm_condbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
129       {"fixup_arm_uncondbranch", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
130       {"fixup_t2_condbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
131       {"fixup_t2_uncondbranch", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
132       {"fixup_arm_thumb_br", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
133       {"fixup_arm_uncondbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
134       {"fixup_arm_condbl", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
135       {"fixup_arm_blx", 8, 24, MCFixupKindInfo::FKF_IsPCRel},
136       {"fixup_arm_thumb_bl", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
137       {"fixup_arm_thumb_blx", 0, 32,
138        MCFixupKindInfo::FKF_IsPCRel |
139            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
140       {"fixup_arm_thumb_cb", 0, 16, MCFixupKindInfo::FKF_IsPCRel},
141       {"fixup_arm_thumb_cp", 8, 8,
142        MCFixupKindInfo::FKF_IsPCRel |
143            MCFixupKindInfo::FKF_IsAlignedDownTo32Bits},
144       {"fixup_arm_thumb_bcc", 8, 8, MCFixupKindInfo::FKF_IsPCRel},
145       // movw / movt: 16-bits immediate but scattered into two chunks 0 - 12, 16
146       // - 19.
147       {"fixup_arm_movt_hi16", 12, 20, 0},
148       {"fixup_arm_movw_lo16", 12, 20, 0},
149       {"fixup_t2_movt_hi16", 12, 20, 0},
150       {"fixup_t2_movw_lo16", 12, 20, 0},
151       {"fixup_arm_mod_imm", 20, 12, 0},
152       {"fixup_t2_so_imm", 26, 6, 0},
153   };
154 
155   if (Kind < FirstTargetFixupKind)
156     return MCAsmBackend::getFixupKindInfo(Kind);
157 
158   assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
159          "Invalid kind!");
160   return (IsLittleEndian ? InfosLE : InfosBE)[Kind - FirstTargetFixupKind];
161 }
162 
163 void ARMAsmBackend::handleAssemblerFlag(MCAssemblerFlag Flag) {
164   switch (Flag) {
165   default:
166     break;
167   case MCAF_Code16:
168     setIsThumb(true);
169     break;
170   case MCAF_Code32:
171     setIsThumb(false);
172     break;
173   }
174 }
175 
176 unsigned ARMAsmBackend::getRelaxedOpcode(unsigned Op) const {
177   bool HasThumb2 = STI->getFeatureBits()[ARM::FeatureThumb2];
178   bool HasV8MBaselineOps = STI->getFeatureBits()[ARM::HasV8MBaselineOps];
179 
180   switch (Op) {
181   default:
182     return Op;
183   case ARM::tBcc:
184     return HasThumb2 ? (unsigned)ARM::t2Bcc : Op;
185   case ARM::tLDRpci:
186     return HasThumb2 ? (unsigned)ARM::t2LDRpci : Op;
187   case ARM::tADR:
188     return HasThumb2 ? (unsigned)ARM::t2ADR : Op;
189   case ARM::tB:
190     return HasV8MBaselineOps ? (unsigned)ARM::t2B : Op;
191   case ARM::tCBZ:
192     return ARM::tHINT;
193   case ARM::tCBNZ:
194     return ARM::tHINT;
195   }
196 }
197 
198 bool ARMAsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
199   if (getRelaxedOpcode(Inst.getOpcode()) != Inst.getOpcode())
200     return true;
201   return false;
202 }
203 
204 const char *ARMAsmBackend::reasonForFixupRelaxation(const MCFixup &Fixup,
205                                                     uint64_t Value) const {
206   switch ((unsigned)Fixup.getKind()) {
207   case ARM::fixup_arm_thumb_br: {
208     // Relaxing tB to t2B. tB has a signed 12-bit displacement with the
209     // low bit being an implied zero. There's an implied +4 offset for the
210     // branch, so we adjust the other way here to determine what's
211     // encodable.
212     //
213     // Relax if the value is too big for a (signed) i8.
214     int64_t Offset = int64_t(Value) - 4;
215     if (Offset > 2046 || Offset < -2048)
216       return "out of range pc-relative fixup value";
217     break;
218   }
219   case ARM::fixup_arm_thumb_bcc: {
220     // Relaxing tBcc to t2Bcc. tBcc has a signed 9-bit displacement with the
221     // low bit being an implied zero. There's an implied +4 offset for the
222     // branch, so we adjust the other way here to determine what's
223     // encodable.
224     //
225     // Relax if the value is too big for a (signed) i8.
226     int64_t Offset = int64_t(Value) - 4;
227     if (Offset > 254 || Offset < -256)
228       return "out of range pc-relative fixup value";
229     break;
230   }
231   case ARM::fixup_thumb_adr_pcrel_10:
232   case ARM::fixup_arm_thumb_cp: {
233     // If the immediate is negative, greater than 1020, or not a multiple
234     // of four, the wide version of the instruction must be used.
235     int64_t Offset = int64_t(Value) - 4;
236     if (Offset & 3)
237       return "misaligned pc-relative fixup value";
238     else if (Offset > 1020 || Offset < 0)
239       return "out of range pc-relative fixup value";
240     break;
241   }
242   case ARM::fixup_arm_thumb_cb: {
243     // If we have a Thumb CBZ or CBNZ instruction and its target is the next
244     // instruction it is is actually out of range for the instruction.
245     // It will be changed to a NOP.
246     int64_t Offset = (Value & ~1);
247     if (Offset == 2)
248       return "will be converted to nop";
249     break;
250   }
251   default:
252     llvm_unreachable("Unexpected fixup kind in reasonForFixupRelaxation()!");
253   }
254   return nullptr;
255 }
256 
257 bool ARMAsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
258                                          const MCRelaxableFragment *DF,
259                                          const MCAsmLayout &Layout) const {
260   return reasonForFixupRelaxation(Fixup, Value);
261 }
262 
263 void ARMAsmBackend::relaxInstruction(const MCInst &Inst,
264                                      const MCSubtargetInfo &STI,
265                                      MCInst &Res) const {
266   unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
267 
268   // Sanity check w/ diagnostic if we get here w/ a bogus instruction.
269   if (RelaxedOp == Inst.getOpcode()) {
270     SmallString<256> Tmp;
271     raw_svector_ostream OS(Tmp);
272     Inst.dump_pretty(OS);
273     OS << "\n";
274     report_fatal_error("unexpected instruction to relax: " + OS.str());
275   }
276 
277   // If we are changing Thumb CBZ or CBNZ instruction to a NOP, aka tHINT, we
278   // have to change the operands too.
279   if ((Inst.getOpcode() == ARM::tCBZ || Inst.getOpcode() == ARM::tCBNZ) &&
280       RelaxedOp == ARM::tHINT) {
281     Res.setOpcode(RelaxedOp);
282     Res.addOperand(MCOperand::createImm(0));
283     Res.addOperand(MCOperand::createImm(14));
284     Res.addOperand(MCOperand::createReg(0));
285     return;
286   }
287 
288   // The rest of instructions we're relaxing have the same operands.
289   // We just need to update to the proper opcode.
290   Res = Inst;
291   Res.setOpcode(RelaxedOp);
292 }
293 
294 bool ARMAsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
295   const uint16_t Thumb1_16bitNopEncoding = 0x46c0; // using MOV r8,r8
296   const uint16_t Thumb2_16bitNopEncoding = 0xbf00; // NOP
297   const uint32_t ARMv4_NopEncoding = 0xe1a00000;   // using MOV r0,r0
298   const uint32_t ARMv6T2_NopEncoding = 0xe320f000; // NOP
299   if (isThumb()) {
300     const uint16_t nopEncoding =
301         hasNOP() ? Thumb2_16bitNopEncoding : Thumb1_16bitNopEncoding;
302     uint64_t NumNops = Count / 2;
303     for (uint64_t i = 0; i != NumNops; ++i)
304       OW->write16(nopEncoding);
305     if (Count & 1)
306       OW->write8(0);
307     return true;
308   }
309   // ARM mode
310   const uint32_t nopEncoding =
311       hasNOP() ? ARMv6T2_NopEncoding : ARMv4_NopEncoding;
312   uint64_t NumNops = Count / 4;
313   for (uint64_t i = 0; i != NumNops; ++i)
314     OW->write32(nopEncoding);
315   // FIXME: should this function return false when unable to write exactly
316   // 'Count' bytes with NOP encodings?
317   switch (Count % 4) {
318   default:
319     break; // No leftover bytes to write
320   case 1:
321     OW->write8(0);
322     break;
323   case 2:
324     OW->write16(0);
325     break;
326   case 3:
327     OW->write16(0);
328     OW->write8(0xa0);
329     break;
330   }
331 
332   return true;
333 }
334 
335 static uint32_t swapHalfWords(uint32_t Value, bool IsLittleEndian) {
336   if (IsLittleEndian) {
337     // Note that the halfwords are stored high first and low second in thumb;
338     // so we need to swap the fixup value here to map properly.
339     uint32_t Swapped = (Value & 0xFFFF0000) >> 16;
340     Swapped |= (Value & 0x0000FFFF) << 16;
341     return Swapped;
342   } else
343     return Value;
344 }
345 
346 static uint32_t joinHalfWords(uint32_t FirstHalf, uint32_t SecondHalf,
347                               bool IsLittleEndian) {
348   uint32_t Value;
349 
350   if (IsLittleEndian) {
351     Value = (SecondHalf & 0xFFFF) << 16;
352     Value |= (FirstHalf & 0xFFFF);
353   } else {
354     Value = (SecondHalf & 0xFFFF);
355     Value |= (FirstHalf & 0xFFFF) << 16;
356   }
357 
358   return Value;
359 }
360 
361 unsigned ARMAsmBackend::adjustFixupValue(const MCFixup &Fixup, uint64_t Value,
362                                          bool IsPCRel, MCContext &Ctx,
363                                          bool IsLittleEndian,
364                                          bool IsResolved) const {
365   unsigned Kind = Fixup.getKind();
366   switch (Kind) {
367   default:
368     Ctx.reportError(Fixup.getLoc(), "bad relocation fixup type");
369     return 0;
370   case FK_Data_1:
371   case FK_Data_2:
372   case FK_Data_4:
373     return Value;
374   case FK_SecRel_2:
375     return Value;
376   case FK_SecRel_4:
377     return Value;
378   case ARM::fixup_arm_movt_hi16:
379     if (!IsPCRel)
380       Value >>= 16;
381     LLVM_FALLTHROUGH;
382   case ARM::fixup_arm_movw_lo16: {
383     unsigned Hi4 = (Value & 0xF000) >> 12;
384     unsigned Lo12 = Value & 0x0FFF;
385     // inst{19-16} = Hi4;
386     // inst{11-0} = Lo12;
387     Value = (Hi4 << 16) | (Lo12);
388     return Value;
389   }
390   case ARM::fixup_t2_movt_hi16:
391     if (!IsPCRel)
392       Value >>= 16;
393     LLVM_FALLTHROUGH;
394   case ARM::fixup_t2_movw_lo16: {
395     unsigned Hi4 = (Value & 0xF000) >> 12;
396     unsigned i = (Value & 0x800) >> 11;
397     unsigned Mid3 = (Value & 0x700) >> 8;
398     unsigned Lo8 = Value & 0x0FF;
399     // inst{19-16} = Hi4;
400     // inst{26} = i;
401     // inst{14-12} = Mid3;
402     // inst{7-0} = Lo8;
403     Value = (Hi4 << 16) | (i << 26) | (Mid3 << 12) | (Lo8);
404     return swapHalfWords(Value, IsLittleEndian);
405   }
406   case ARM::fixup_arm_ldst_pcrel_12:
407     // ARM PC-relative values are offset by 8.
408     Value -= 4;
409     LLVM_FALLTHROUGH;
410   case ARM::fixup_t2_ldst_pcrel_12: {
411     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
412     Value -= 4;
413     bool isAdd = true;
414     if ((int64_t)Value < 0) {
415       Value = -Value;
416       isAdd = false;
417     }
418     if (Value >= 4096) {
419       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
420       return 0;
421     }
422     Value |= isAdd << 23;
423 
424     // Same addressing mode as fixup_arm_pcrel_10,
425     // but with 16-bit halfwords swapped.
426     if (Kind == ARM::fixup_t2_ldst_pcrel_12)
427       return swapHalfWords(Value, IsLittleEndian);
428 
429     return Value;
430   }
431   case ARM::fixup_arm_adr_pcrel_12: {
432     // ARM PC-relative values are offset by 8.
433     Value -= 8;
434     unsigned opc = 4; // bits {24-21}. Default to add: 0b0100
435     if ((int64_t)Value < 0) {
436       Value = -Value;
437       opc = 2; // 0b0010
438     }
439     if (ARM_AM::getSOImmVal(Value) == -1) {
440       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
441       return 0;
442     }
443     // Encode the immediate and shift the opcode into place.
444     return ARM_AM::getSOImmVal(Value) | (opc << 21);
445   }
446 
447   case ARM::fixup_t2_adr_pcrel_12: {
448     Value -= 4;
449     unsigned opc = 0;
450     if ((int64_t)Value < 0) {
451       Value = -Value;
452       opc = 5;
453     }
454 
455     uint32_t out = (opc << 21);
456     out |= (Value & 0x800) << 15;
457     out |= (Value & 0x700) << 4;
458     out |= (Value & 0x0FF);
459 
460     return swapHalfWords(out, IsLittleEndian);
461   }
462 
463   case ARM::fixup_arm_condbranch:
464   case ARM::fixup_arm_uncondbranch:
465   case ARM::fixup_arm_uncondbl:
466   case ARM::fixup_arm_condbl:
467   case ARM::fixup_arm_blx:
468     // These values don't encode the low two bits since they're always zero.
469     // Offset by 8 just as above.
470     if (const MCSymbolRefExpr *SRE =
471             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
472       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
473         return 0;
474     return 0xffffff & ((Value - 8) >> 2);
475   case ARM::fixup_t2_uncondbranch: {
476     Value = Value - 4;
477     Value >>= 1; // Low bit is not encoded.
478 
479     uint32_t out = 0;
480     bool I = Value & 0x800000;
481     bool J1 = Value & 0x400000;
482     bool J2 = Value & 0x200000;
483     J1 ^= I;
484     J2 ^= I;
485 
486     out |= I << 26;                 // S bit
487     out |= !J1 << 13;               // J1 bit
488     out |= !J2 << 11;               // J2 bit
489     out |= (Value & 0x1FF800) << 5; // imm6 field
490     out |= (Value & 0x0007FF);      // imm11 field
491 
492     return swapHalfWords(out, IsLittleEndian);
493   }
494   case ARM::fixup_t2_condbranch: {
495     Value = Value - 4;
496     Value >>= 1; // Low bit is not encoded.
497 
498     uint64_t out = 0;
499     out |= (Value & 0x80000) << 7; // S bit
500     out |= (Value & 0x40000) >> 7; // J2 bit
501     out |= (Value & 0x20000) >> 4; // J1 bit
502     out |= (Value & 0x1F800) << 5; // imm6 field
503     out |= (Value & 0x007FF);      // imm11 field
504 
505     return swapHalfWords(out, IsLittleEndian);
506   }
507   case ARM::fixup_arm_thumb_bl: {
508     // The value doesn't encode the low bit (always zero) and is offset by
509     // four. The 32-bit immediate value is encoded as
510     //   imm32 = SignExtend(S:I1:I2:imm10:imm11:0)
511     // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
512     // The value is encoded into disjoint bit positions in the destination
513     // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
514     // J = either J1 or J2 bit
515     //
516     //   BL:  xxxxxSIIIIIIIIII xxJxJIIIIIIIIIII
517     //
518     // Note that the halfwords are stored high first, low second; so we need
519     // to transpose the fixup value here to map properly.
520     uint32_t offset = (Value - 4) >> 1;
521     uint32_t signBit = (offset & 0x800000) >> 23;
522     uint32_t I1Bit = (offset & 0x400000) >> 22;
523     uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
524     uint32_t I2Bit = (offset & 0x200000) >> 21;
525     uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
526     uint32_t imm10Bits = (offset & 0x1FF800) >> 11;
527     uint32_t imm11Bits = (offset & 0x000007FF);
528 
529     uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10Bits);
530     uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
531                            (uint16_t)imm11Bits);
532     return joinHalfWords(FirstHalf, SecondHalf, IsLittleEndian);
533   }
534   case ARM::fixup_arm_thumb_blx: {
535     // The value doesn't encode the low two bits (always zero) and is offset by
536     // four (see fixup_arm_thumb_cp). The 32-bit immediate value is encoded as
537     //   imm32 = SignExtend(S:I1:I2:imm10H:imm10L:00)
538     // where I1 = NOT(J1 ^ S) and I2 = NOT(J2 ^ S).
539     // The value is encoded into disjoint bit positions in the destination
540     // opcode. x = unchanged, I = immediate value bit, S = sign extension bit,
541     // J = either J1 or J2 bit, 0 = zero.
542     //
543     //   BLX: xxxxxSIIIIIIIIII xxJxJIIIIIIIIII0
544     //
545     // Note that the halfwords are stored high first, low second; so we need
546     // to transpose the fixup value here to map properly.
547     if (Value % 4 != 0) {
548       Ctx.reportError(Fixup.getLoc(), "misaligned ARM call destination");
549       return 0;
550     }
551 
552     uint32_t offset = (Value - 4) >> 2;
553     if (const MCSymbolRefExpr *SRE =
554             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
555       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
556         offset = 0;
557     uint32_t signBit = (offset & 0x400000) >> 22;
558     uint32_t I1Bit = (offset & 0x200000) >> 21;
559     uint32_t J1Bit = (I1Bit ^ 0x1) ^ signBit;
560     uint32_t I2Bit = (offset & 0x100000) >> 20;
561     uint32_t J2Bit = (I2Bit ^ 0x1) ^ signBit;
562     uint32_t imm10HBits = (offset & 0xFFC00) >> 10;
563     uint32_t imm10LBits = (offset & 0x3FF);
564 
565     uint32_t FirstHalf = (((uint16_t)signBit << 10) | (uint16_t)imm10HBits);
566     uint32_t SecondHalf = (((uint16_t)J1Bit << 13) | ((uint16_t)J2Bit << 11) |
567                            ((uint16_t)imm10LBits) << 1);
568     return joinHalfWords(FirstHalf, SecondHalf, IsLittleEndian);
569   }
570   case ARM::fixup_thumb_adr_pcrel_10:
571   case ARM::fixup_arm_thumb_cp:
572     // On CPUs supporting Thumb2, this will be relaxed to an ldr.w, otherwise we
573     // could have an error on our hands.
574     if (!STI->getFeatureBits()[ARM::FeatureThumb2] && IsResolved) {
575       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
576       if (FixupDiagnostic) {
577         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
578         return 0;
579       }
580     }
581     // Offset by 4, and don't encode the low two bits.
582     return ((Value - 4) >> 2) & 0xff;
583   case ARM::fixup_arm_thumb_cb: {
584     // CB instructions can only branch to offsets in [4, 126] in multiples of 2
585     // so ensure that the raw value LSB is zero and it lies in [2, 130].
586     // An offset of 2 will be relaxed to a NOP.
587     if ((int64_t)Value < 2 || Value > 0x82 || Value & 1) {
588       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
589       return 0;
590     }
591     // Offset by 4 and don't encode the lower bit, which is always 0.
592     // FIXME: diagnose if no Thumb2
593     uint32_t Binary = (Value - 4) >> 1;
594     return ((Binary & 0x20) << 4) | ((Binary & 0x1f) << 3);
595   }
596   case ARM::fixup_arm_thumb_br:
597     // Offset by 4 and don't encode the lower bit, which is always 0.
598     if (!STI->getFeatureBits()[ARM::FeatureThumb2] &&
599         !STI->getFeatureBits()[ARM::HasV8MBaselineOps]) {
600       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
601       if (FixupDiagnostic) {
602         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
603         return 0;
604       }
605     }
606     return ((Value - 4) >> 1) & 0x7ff;
607   case ARM::fixup_arm_thumb_bcc:
608     // Offset by 4 and don't encode the lower bit, which is always 0.
609     if (!STI->getFeatureBits()[ARM::FeatureThumb2]) {
610       const char *FixupDiagnostic = reasonForFixupRelaxation(Fixup, Value);
611       if (FixupDiagnostic) {
612         Ctx.reportError(Fixup.getLoc(), FixupDiagnostic);
613         return 0;
614       }
615     }
616     return ((Value - 4) >> 1) & 0xff;
617   case ARM::fixup_arm_pcrel_10_unscaled: {
618     Value = Value - 8; // ARM fixups offset by an additional word and don't
619                        // need to adjust for the half-word ordering.
620     bool isAdd = true;
621     if ((int64_t)Value < 0) {
622       Value = -Value;
623       isAdd = false;
624     }
625     // The value has the low 4 bits encoded in [3:0] and the high 4 in [11:8].
626     if (Value >= 256) {
627       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
628       return 0;
629     }
630     Value = (Value & 0xf) | ((Value & 0xf0) << 4);
631     return Value | (isAdd << 23);
632   }
633   case ARM::fixup_arm_pcrel_10:
634     Value = Value - 4; // ARM fixups offset by an additional word and don't
635                        // need to adjust for the half-word ordering.
636     LLVM_FALLTHROUGH;
637   case ARM::fixup_t2_pcrel_10: {
638     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
639     Value = Value - 4;
640     bool isAdd = true;
641     if ((int64_t)Value < 0) {
642       Value = -Value;
643       isAdd = false;
644     }
645     // These values don't encode the low two bits since they're always zero.
646     Value >>= 2;
647     if (Value >= 256) {
648       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
649       return 0;
650     }
651     Value |= isAdd << 23;
652 
653     // Same addressing mode as fixup_arm_pcrel_10, but with 16-bit halfwords
654     // swapped.
655     if (Kind == ARM::fixup_t2_pcrel_10)
656       return swapHalfWords(Value, IsLittleEndian);
657 
658     return Value;
659   }
660   case ARM::fixup_arm_pcrel_9:
661     Value = Value - 4; // ARM fixups offset by an additional word and don't
662                        // need to adjust for the half-word ordering.
663     LLVM_FALLTHROUGH;
664   case ARM::fixup_t2_pcrel_9: {
665     // Offset by 4, adjusted by two due to the half-word ordering of thumb.
666     Value = Value - 4;
667     bool isAdd = true;
668     if ((int64_t)Value < 0) {
669       Value = -Value;
670       isAdd = false;
671     }
672     // These values don't encode the low bit since it's always zero.
673     if (Value & 1) {
674       Ctx.reportError(Fixup.getLoc(), "invalid value for this fixup");
675       return 0;
676     }
677     Value >>= 1;
678     if (Value >= 256) {
679       Ctx.reportError(Fixup.getLoc(), "out of range pc-relative fixup value");
680       return 0;
681     }
682     Value |= isAdd << 23;
683 
684     // Same addressing mode as fixup_arm_pcrel_9, but with 16-bit halfwords
685     // swapped.
686     if (Kind == ARM::fixup_t2_pcrel_9)
687       return swapHalfWords(Value, IsLittleEndian);
688 
689     return Value;
690   }
691   case ARM::fixup_arm_mod_imm:
692     Value = ARM_AM::getSOImmVal(Value);
693     if (Value >> 12) {
694       Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
695       return 0;
696     }
697     return Value;
698   case ARM::fixup_t2_so_imm: {
699     Value = ARM_AM::getT2SOImmVal(Value);
700     if ((int64_t)Value < 0) {
701       Ctx.reportError(Fixup.getLoc(), "out of range immediate fixup value");
702       return 0;
703     }
704     // Value will contain a 12-bit value broken up into a 4-bit shift in bits
705     // 11:8 and the 8-bit immediate in 0:7. The instruction has the immediate
706     // in 0:7. The 4-bit shift is split up into i:imm3 where i is placed at bit
707     // 10 of the upper half-word and imm3 is placed at 14:12 of the lower
708     // half-word.
709     uint64_t EncValue = 0;
710     EncValue |= (Value & 0x800) << 15;
711     EncValue |= (Value & 0x700) << 4;
712     EncValue |= (Value & 0xff);
713     return swapHalfWords(EncValue, IsLittleEndian);
714   }
715   }
716 }
717 
718 void ARMAsmBackend::processFixupValue(const MCAssembler &Asm,
719                                       const MCAsmLayout &Layout,
720                                       const MCFixup &Fixup,
721                                       const MCFragment *DF,
722                                       const MCValue &Target, uint64_t &Value,
723                                       bool &IsResolved) {
724   const MCSymbolRefExpr *A = Target.getSymA();
725   const MCSymbol *Sym = A ? &A->getSymbol() : nullptr;
726   const unsigned FixupKind = Fixup.getKind() ;
727   // MachO (the only user of "Value") tries to make .o files that look vaguely
728   // pre-linked, so for MOVW/MOVT and .word relocations they put the Thumb bit
729   // into the addend if possible. Other relocation types don't want this bit
730   // though (branches couldn't encode it if it *was* present, and no other
731   // relocations exist) and it can interfere with checking valid expressions.
732   if (FixupKind == FK_Data_4 ||
733       FixupKind == ARM::fixup_arm_movw_lo16 ||
734       FixupKind == ARM::fixup_arm_movt_hi16 ||
735       FixupKind == ARM::fixup_t2_movw_lo16 ||
736       FixupKind == ARM::fixup_t2_movt_hi16) {
737     if (Sym) {
738       if (Asm.isThumbFunc(Sym))
739         Value |= 1;
740     }
741   }
742   if (IsResolved && (unsigned)Fixup.getKind() == ARM::fixup_arm_thumb_bl) {
743     assert(Sym && "How did we resolve this?");
744 
745     // If the symbol is external the linker will handle it.
746     // FIXME: Should we handle it as an optimization?
747 
748     // If the symbol is out of range, produce a relocation and hope the
749     // linker can handle it. GNU AS produces an error in this case.
750     if (Sym->isExternal() || Value >= 0x400004)
751       IsResolved = false;
752   }
753   // Create relocations for unconditional branches to function symbols with
754   // different execution mode in ELF binaries.
755   if (Sym && Sym->isELF()) {
756     unsigned Type = dyn_cast<MCSymbolELF>(Sym)->getType();
757     if ((Type == ELF::STT_FUNC || Type == ELF::STT_GNU_IFUNC)) {
758       if (Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_uncondbranch))
759         IsResolved = false;
760       if (!Asm.isThumbFunc(Sym) && (FixupKind == ARM::fixup_arm_thumb_br ||
761                                     FixupKind == ARM::fixup_arm_thumb_bl ||
762                                     FixupKind == ARM::fixup_t2_uncondbranch))
763         IsResolved = false;
764     }
765   }
766   // We must always generate a relocation for BL/BLX instructions if we have
767   // a symbol to reference, as the linker relies on knowing the destination
768   // symbol's thumb-ness to get interworking right.
769   if (A && (FixupKind == ARM::fixup_arm_thumb_blx ||
770             FixupKind == ARM::fixup_arm_blx ||
771             FixupKind == ARM::fixup_arm_uncondbl ||
772             FixupKind == ARM::fixup_arm_condbl))
773     IsResolved = false;
774 }
775 
776 /// getFixupKindNumBytes - The number of bytes the fixup may change.
777 static unsigned getFixupKindNumBytes(unsigned Kind) {
778   switch (Kind) {
779   default:
780     llvm_unreachable("Unknown fixup kind!");
781 
782   case FK_Data_1:
783   case ARM::fixup_arm_thumb_bcc:
784   case ARM::fixup_arm_thumb_cp:
785   case ARM::fixup_thumb_adr_pcrel_10:
786     return 1;
787 
788   case FK_Data_2:
789   case ARM::fixup_arm_thumb_br:
790   case ARM::fixup_arm_thumb_cb:
791   case ARM::fixup_arm_mod_imm:
792     return 2;
793 
794   case ARM::fixup_arm_pcrel_10_unscaled:
795   case ARM::fixup_arm_ldst_pcrel_12:
796   case ARM::fixup_arm_pcrel_10:
797   case ARM::fixup_arm_pcrel_9:
798   case ARM::fixup_arm_adr_pcrel_12:
799   case ARM::fixup_arm_uncondbl:
800   case ARM::fixup_arm_condbl:
801   case ARM::fixup_arm_blx:
802   case ARM::fixup_arm_condbranch:
803   case ARM::fixup_arm_uncondbranch:
804     return 3;
805 
806   case FK_Data_4:
807   case ARM::fixup_t2_ldst_pcrel_12:
808   case ARM::fixup_t2_condbranch:
809   case ARM::fixup_t2_uncondbranch:
810   case ARM::fixup_t2_pcrel_10:
811   case ARM::fixup_t2_pcrel_9:
812   case ARM::fixup_t2_adr_pcrel_12:
813   case ARM::fixup_arm_thumb_bl:
814   case ARM::fixup_arm_thumb_blx:
815   case ARM::fixup_arm_movt_hi16:
816   case ARM::fixup_arm_movw_lo16:
817   case ARM::fixup_t2_movt_hi16:
818   case ARM::fixup_t2_movw_lo16:
819   case ARM::fixup_t2_so_imm:
820     return 4;
821 
822   case FK_SecRel_2:
823     return 2;
824   case FK_SecRel_4:
825     return 4;
826   }
827 }
828 
829 /// getFixupKindContainerSizeBytes - The number of bytes of the
830 /// container involved in big endian.
831 static unsigned getFixupKindContainerSizeBytes(unsigned Kind) {
832   switch (Kind) {
833   default:
834     llvm_unreachable("Unknown fixup kind!");
835 
836   case FK_Data_1:
837     return 1;
838   case FK_Data_2:
839     return 2;
840   case FK_Data_4:
841     return 4;
842 
843   case ARM::fixup_arm_thumb_bcc:
844   case ARM::fixup_arm_thumb_cp:
845   case ARM::fixup_thumb_adr_pcrel_10:
846   case ARM::fixup_arm_thumb_br:
847   case ARM::fixup_arm_thumb_cb:
848     // Instruction size is 2 bytes.
849     return 2;
850 
851   case ARM::fixup_arm_pcrel_10_unscaled:
852   case ARM::fixup_arm_ldst_pcrel_12:
853   case ARM::fixup_arm_pcrel_10:
854   case ARM::fixup_arm_adr_pcrel_12:
855   case ARM::fixup_arm_uncondbl:
856   case ARM::fixup_arm_condbl:
857   case ARM::fixup_arm_blx:
858   case ARM::fixup_arm_condbranch:
859   case ARM::fixup_arm_uncondbranch:
860   case ARM::fixup_t2_ldst_pcrel_12:
861   case ARM::fixup_t2_condbranch:
862   case ARM::fixup_t2_uncondbranch:
863   case ARM::fixup_t2_pcrel_10:
864   case ARM::fixup_t2_adr_pcrel_12:
865   case ARM::fixup_arm_thumb_bl:
866   case ARM::fixup_arm_thumb_blx:
867   case ARM::fixup_arm_movt_hi16:
868   case ARM::fixup_arm_movw_lo16:
869   case ARM::fixup_t2_movt_hi16:
870   case ARM::fixup_t2_movw_lo16:
871   case ARM::fixup_arm_mod_imm:
872   case ARM::fixup_t2_so_imm:
873     // Instruction size is 4 bytes.
874     return 4;
875   }
876 }
877 
878 void ARMAsmBackend::applyFixup(const MCFixup &Fixup, char *Data,
879                                unsigned DataSize, uint64_t Value, bool IsPCRel,
880                                MCContext &Ctx) const {
881   unsigned NumBytes = getFixupKindNumBytes(Fixup.getKind());
882   Value = adjustFixupValue(Fixup, Value, IsPCRel, Ctx, IsLittleEndian, true);
883   if (!Value)
884     return; // Doesn't change encoding.
885 
886   unsigned Offset = Fixup.getOffset();
887   assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!");
888 
889   // Used to point to big endian bytes.
890   unsigned FullSizeBytes;
891   if (!IsLittleEndian) {
892     FullSizeBytes = getFixupKindContainerSizeBytes(Fixup.getKind());
893     assert((Offset + FullSizeBytes) <= DataSize && "Invalid fixup size!");
894     assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
895   }
896 
897   // For each byte of the fragment that the fixup touches, mask in the bits from
898   // the fixup value. The Value has been "split up" into the appropriate
899   // bitfields above.
900   for (unsigned i = 0; i != NumBytes; ++i) {
901     unsigned Idx = IsLittleEndian ? i : (FullSizeBytes - 1 - i);
902     Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
903   }
904 }
905 
906 namespace CU {
907 
908 /// \brief Compact unwind encoding values.
909 enum CompactUnwindEncodings {
910   UNWIND_ARM_MODE_MASK                         = 0x0F000000,
911   UNWIND_ARM_MODE_FRAME                        = 0x01000000,
912   UNWIND_ARM_MODE_FRAME_D                      = 0x02000000,
913   UNWIND_ARM_MODE_DWARF                        = 0x04000000,
914 
915   UNWIND_ARM_FRAME_STACK_ADJUST_MASK           = 0x00C00000,
916 
917   UNWIND_ARM_FRAME_FIRST_PUSH_R4               = 0x00000001,
918   UNWIND_ARM_FRAME_FIRST_PUSH_R5               = 0x00000002,
919   UNWIND_ARM_FRAME_FIRST_PUSH_R6               = 0x00000004,
920 
921   UNWIND_ARM_FRAME_SECOND_PUSH_R8              = 0x00000008,
922   UNWIND_ARM_FRAME_SECOND_PUSH_R9              = 0x00000010,
923   UNWIND_ARM_FRAME_SECOND_PUSH_R10             = 0x00000020,
924   UNWIND_ARM_FRAME_SECOND_PUSH_R11             = 0x00000040,
925   UNWIND_ARM_FRAME_SECOND_PUSH_R12             = 0x00000080,
926 
927   UNWIND_ARM_FRAME_D_REG_COUNT_MASK            = 0x00000F00,
928 
929   UNWIND_ARM_DWARF_SECTION_OFFSET              = 0x00FFFFFF
930 };
931 
932 } // end CU namespace
933 
934 /// Generate compact unwind encoding for the function based on the CFI
935 /// instructions. If the CFI instructions describe a frame that cannot be
936 /// encoded in compact unwind, the method returns UNWIND_ARM_MODE_DWARF which
937 /// tells the runtime to fallback and unwind using dwarf.
938 uint32_t ARMAsmBackendDarwin::generateCompactUnwindEncoding(
939     ArrayRef<MCCFIInstruction> Instrs) const {
940   DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "generateCU()\n");
941   // Only armv7k uses CFI based unwinding.
942   if (Subtype != MachO::CPU_SUBTYPE_ARM_V7K)
943     return 0;
944   // No .cfi directives means no frame.
945   if (Instrs.empty())
946     return 0;
947   // Start off assuming CFA is at SP+0.
948   int CFARegister = ARM::SP;
949   int CFARegisterOffset = 0;
950   // Mark savable registers as initially unsaved
951   DenseMap<unsigned, int> RegOffsets;
952   int FloatRegCount = 0;
953   // Process each .cfi directive and build up compact unwind info.
954   for (size_t i = 0, e = Instrs.size(); i != e; ++i) {
955     int Reg;
956     const MCCFIInstruction &Inst = Instrs[i];
957     switch (Inst.getOperation()) {
958     case MCCFIInstruction::OpDefCfa: // DW_CFA_def_cfa
959       CFARegisterOffset = -Inst.getOffset();
960       CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true);
961       break;
962     case MCCFIInstruction::OpDefCfaOffset: // DW_CFA_def_cfa_offset
963       CFARegisterOffset = -Inst.getOffset();
964       break;
965     case MCCFIInstruction::OpDefCfaRegister: // DW_CFA_def_cfa_register
966       CFARegister = MRI.getLLVMRegNum(Inst.getRegister(), true);
967       break;
968     case MCCFIInstruction::OpOffset: // DW_CFA_offset
969       Reg = MRI.getLLVMRegNum(Inst.getRegister(), true);
970       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
971         RegOffsets[Reg] = Inst.getOffset();
972       else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
973         RegOffsets[Reg] = Inst.getOffset();
974         ++FloatRegCount;
975       } else {
976         DEBUG_WITH_TYPE("compact-unwind",
977                         llvm::dbgs() << ".cfi_offset on unknown register="
978                                      << Inst.getRegister() << "\n");
979         return CU::UNWIND_ARM_MODE_DWARF;
980       }
981       break;
982     case MCCFIInstruction::OpRelOffset: // DW_CFA_advance_loc
983       // Ignore
984       break;
985     default:
986       // Directive not convertable to compact unwind, bail out.
987       DEBUG_WITH_TYPE("compact-unwind",
988                       llvm::dbgs()
989                           << "CFI directive not compatiable with comact "
990                              "unwind encoding, opcode=" << Inst.getOperation()
991                           << "\n");
992       return CU::UNWIND_ARM_MODE_DWARF;
993       break;
994     }
995   }
996 
997   // If no frame set up, return no unwind info.
998   if ((CFARegister == ARM::SP) && (CFARegisterOffset == 0))
999     return 0;
1000 
1001   // Verify standard frame (lr/r7) was used.
1002   if (CFARegister != ARM::R7) {
1003     DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs() << "frame register is "
1004                                                    << CFARegister
1005                                                    << " instead of r7\n");
1006     return CU::UNWIND_ARM_MODE_DWARF;
1007   }
1008   int StackAdjust = CFARegisterOffset - 8;
1009   if (RegOffsets.lookup(ARM::LR) != (-4 - StackAdjust)) {
1010     DEBUG_WITH_TYPE("compact-unwind",
1011                     llvm::dbgs()
1012                         << "LR not saved as standard frame, StackAdjust="
1013                         << StackAdjust
1014                         << ", CFARegisterOffset=" << CFARegisterOffset
1015                         << ", lr save at offset=" << RegOffsets[14] << "\n");
1016     return CU::UNWIND_ARM_MODE_DWARF;
1017   }
1018   if (RegOffsets.lookup(ARM::R7) != (-8 - StackAdjust)) {
1019     DEBUG_WITH_TYPE("compact-unwind",
1020                     llvm::dbgs() << "r7 not saved as standard frame\n");
1021     return CU::UNWIND_ARM_MODE_DWARF;
1022   }
1023   uint32_t CompactUnwindEncoding = CU::UNWIND_ARM_MODE_FRAME;
1024 
1025   // If var-args are used, there may be a stack adjust required.
1026   switch (StackAdjust) {
1027   case 0:
1028     break;
1029   case 4:
1030     CompactUnwindEncoding |= 0x00400000;
1031     break;
1032   case 8:
1033     CompactUnwindEncoding |= 0x00800000;
1034     break;
1035   case 12:
1036     CompactUnwindEncoding |= 0x00C00000;
1037     break;
1038   default:
1039     DEBUG_WITH_TYPE("compact-unwind", llvm::dbgs()
1040                                           << ".cfi_def_cfa stack adjust ("
1041                                           << StackAdjust << ") out of range\n");
1042     return CU::UNWIND_ARM_MODE_DWARF;
1043   }
1044 
1045   // If r6 is saved, it must be right below r7.
1046   static struct {
1047     unsigned Reg;
1048     unsigned Encoding;
1049   } GPRCSRegs[] = {{ARM::R6, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R6},
1050                    {ARM::R5, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R5},
1051                    {ARM::R4, CU::UNWIND_ARM_FRAME_FIRST_PUSH_R4},
1052                    {ARM::R12, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R12},
1053                    {ARM::R11, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R11},
1054                    {ARM::R10, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R10},
1055                    {ARM::R9, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R9},
1056                    {ARM::R8, CU::UNWIND_ARM_FRAME_SECOND_PUSH_R8}};
1057 
1058   int CurOffset = -8 - StackAdjust;
1059   for (auto CSReg : GPRCSRegs) {
1060     auto Offset = RegOffsets.find(CSReg.Reg);
1061     if (Offset == RegOffsets.end())
1062       continue;
1063 
1064     int RegOffset = Offset->second;
1065     if (RegOffset != CurOffset - 4) {
1066       DEBUG_WITH_TYPE("compact-unwind",
1067                       llvm::dbgs() << MRI.getName(CSReg.Reg) << " saved at "
1068                                    << RegOffset << " but only supported at "
1069                                    << CurOffset << "\n");
1070       return CU::UNWIND_ARM_MODE_DWARF;
1071     }
1072     CompactUnwindEncoding |= CSReg.Encoding;
1073     CurOffset -= 4;
1074   }
1075 
1076   // If no floats saved, we are done.
1077   if (FloatRegCount == 0)
1078     return CompactUnwindEncoding;
1079 
1080   // Switch mode to include D register saving.
1081   CompactUnwindEncoding &= ~CU::UNWIND_ARM_MODE_MASK;
1082   CompactUnwindEncoding |= CU::UNWIND_ARM_MODE_FRAME_D;
1083 
1084   // FIXME: supporting more than 4 saved D-registers compactly would be trivial,
1085   // but needs coordination with the linker and libunwind.
1086   if (FloatRegCount > 4) {
1087     DEBUG_WITH_TYPE("compact-unwind",
1088                     llvm::dbgs() << "unsupported number of D registers saved ("
1089                                  << FloatRegCount << ")\n");
1090       return CU::UNWIND_ARM_MODE_DWARF;
1091   }
1092 
1093   // Floating point registers must either be saved sequentially, or we defer to
1094   // DWARF. No gaps allowed here so check that each saved d-register is
1095   // precisely where it should be.
1096   static unsigned FPRCSRegs[] = { ARM::D8, ARM::D10, ARM::D12, ARM::D14 };
1097   for (int Idx = FloatRegCount - 1; Idx >= 0; --Idx) {
1098     auto Offset = RegOffsets.find(FPRCSRegs[Idx]);
1099     if (Offset == RegOffsets.end()) {
1100       DEBUG_WITH_TYPE("compact-unwind",
1101                       llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1102                                    << MRI.getName(FPRCSRegs[Idx])
1103                                    << " not saved\n");
1104       return CU::UNWIND_ARM_MODE_DWARF;
1105     } else if (Offset->second != CurOffset - 8) {
1106       DEBUG_WITH_TYPE("compact-unwind",
1107                       llvm::dbgs() << FloatRegCount << " D-regs saved, but "
1108                                    << MRI.getName(FPRCSRegs[Idx])
1109                                    << " saved at " << Offset->second
1110                                    << ", expected at " << CurOffset - 8
1111                                    << "\n");
1112       return CU::UNWIND_ARM_MODE_DWARF;
1113     }
1114     CurOffset -= 8;
1115   }
1116 
1117   return CompactUnwindEncoding | ((FloatRegCount - 1) << 8);
1118 }
1119 
1120 static MachO::CPUSubTypeARM getMachOSubTypeFromArch(StringRef Arch) {
1121   unsigned AK = ARM::parseArch(Arch);
1122   switch (AK) {
1123   default:
1124     return MachO::CPU_SUBTYPE_ARM_V7;
1125   case ARM::AK_ARMV4T:
1126     return MachO::CPU_SUBTYPE_ARM_V4T;
1127   case ARM::AK_ARMV5T:
1128   case ARM::AK_ARMV5TE:
1129   case ARM::AK_ARMV5TEJ:
1130     return MachO::CPU_SUBTYPE_ARM_V5;
1131   case ARM::AK_ARMV6:
1132   case ARM::AK_ARMV6K:
1133     return MachO::CPU_SUBTYPE_ARM_V6;
1134   case ARM::AK_ARMV7A:
1135     return MachO::CPU_SUBTYPE_ARM_V7;
1136   case ARM::AK_ARMV7S:
1137     return MachO::CPU_SUBTYPE_ARM_V7S;
1138   case ARM::AK_ARMV7K:
1139     return MachO::CPU_SUBTYPE_ARM_V7K;
1140   case ARM::AK_ARMV6M:
1141     return MachO::CPU_SUBTYPE_ARM_V6M;
1142   case ARM::AK_ARMV7M:
1143     return MachO::CPU_SUBTYPE_ARM_V7M;
1144   case ARM::AK_ARMV7EM:
1145     return MachO::CPU_SUBTYPE_ARM_V7EM;
1146   }
1147 }
1148 
1149 MCAsmBackend *llvm::createARMAsmBackend(const Target &T,
1150                                         const MCRegisterInfo &MRI,
1151                                         const Triple &TheTriple, StringRef CPU,
1152                                         const MCTargetOptions &Options,
1153                                         bool isLittle) {
1154   switch (TheTriple.getObjectFormat()) {
1155   default:
1156     llvm_unreachable("unsupported object format");
1157   case Triple::MachO: {
1158     MachO::CPUSubTypeARM CS = getMachOSubTypeFromArch(TheTriple.getArchName());
1159     return new ARMAsmBackendDarwin(T, TheTriple, MRI, CS);
1160   }
1161   case Triple::COFF:
1162     assert(TheTriple.isOSWindows() && "non-Windows ARM COFF is not supported");
1163     return new ARMAsmBackendWinCOFF(T, TheTriple);
1164   case Triple::ELF:
1165     assert(TheTriple.isOSBinFormatELF() && "using ELF for non-ELF target");
1166     uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1167     return new ARMAsmBackendELF(T, TheTriple, OSABI, isLittle);
1168   }
1169 }
1170 
1171 MCAsmBackend *llvm::createARMLEAsmBackend(const Target &T,
1172                                           const MCRegisterInfo &MRI,
1173                                           const Triple &TT, StringRef CPU,
1174                                           const MCTargetOptions &Options) {
1175   return createARMAsmBackend(T, MRI, TT, CPU, Options, true);
1176 }
1177 
1178 MCAsmBackend *llvm::createARMBEAsmBackend(const Target &T,
1179                                           const MCRegisterInfo &MRI,
1180                                           const Triple &TT, StringRef CPU,
1181                                           const MCTargetOptions &Options) {
1182   return createARMAsmBackend(T, MRI, TT, CPU, Options, false);
1183 }
1184 
1185 MCAsmBackend *llvm::createThumbLEAsmBackend(const Target &T,
1186                                             const MCRegisterInfo &MRI,
1187                                             const Triple &TT, StringRef CPU,
1188                                             const MCTargetOptions &Options) {
1189   return createARMAsmBackend(T, MRI, TT, CPU, Options, true);
1190 }
1191 
1192 MCAsmBackend *llvm::createThumbBEAsmBackend(const Target &T,
1193                                             const MCRegisterInfo &MRI,
1194                                             const Triple &TT, StringRef CPU,
1195                                             const MCTargetOptions &Options) {
1196   return createARMAsmBackend(T, MRI, TT, CPU, Options, false);
1197 }
1198