1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "ARMFeatures.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMBaseInfo.h"
13 #include "MCTargetDesc/ARMMCExpr.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringSwitch.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCAsmInfo.h"
21 #include "llvm/MC/MCAssembler.h"
22 #include "llvm/MC/MCContext.h"
23 #include "llvm/MC/MCDisassembler/MCDisassembler.h"
24 #include "llvm/MC/MCELFStreamer.h"
25 #include "llvm/MC/MCExpr.h"
26 #include "llvm/MC/MCInst.h"
27 #include "llvm/MC/MCInstrDesc.h"
28 #include "llvm/MC/MCInstrInfo.h"
29 #include "llvm/MC/MCObjectFileInfo.h"
30 #include "llvm/MC/MCParser/MCAsmLexer.h"
31 #include "llvm/MC/MCParser/MCAsmParser.h"
32 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
33 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
34 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
35 #include "llvm/MC/MCRegisterInfo.h"
36 #include "llvm/MC/MCSection.h"
37 #include "llvm/MC/MCStreamer.h"
38 #include "llvm/MC/MCSubtargetInfo.h"
39 #include "llvm/MC/MCSymbol.h"
40 #include "llvm/Support/ARMBuildAttributes.h"
41 #include "llvm/Support/ARMEHABI.h"
42 #include "llvm/Support/COFF.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/ELF.h"
46 #include "llvm/Support/MathExtras.h"
47 #include "llvm/Support/SourceMgr.h"
48 #include "llvm/Support/TargetParser.h"
49 #include "llvm/Support/TargetRegistry.h"
50 #include "llvm/Support/raw_ostream.h"
51 
52 using namespace llvm;
53 
54 namespace {
55 
56 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
57 
58 static cl::opt<ImplicitItModeTy> ImplicitItMode(
59     "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
60     cl::desc("Allow conditional instructions outdside of an IT block"),
61     cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
62                           "Accept in both ISAs, emit implicit ITs in Thumb"),
63                clEnumValN(ImplicitItModeTy::Never, "never",
64                           "Warn in ARM, reject in Thumb"),
65                clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
66                           "Accept in ARM, reject in Thumb"),
67                clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
68                           "Warn in ARM, emit implicit ITs in Thumb")));
69 
70 class ARMOperand;
71 
72 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
73 
74 class UnwindContext {
75   MCAsmParser &Parser;
76 
77   typedef SmallVector<SMLoc, 4> Locs;
78 
79   Locs FnStartLocs;
80   Locs CantUnwindLocs;
81   Locs PersonalityLocs;
82   Locs PersonalityIndexLocs;
83   Locs HandlerDataLocs;
84   int FPReg;
85 
86 public:
87   UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
88 
89   bool hasFnStart() const { return !FnStartLocs.empty(); }
90   bool cantUnwind() const { return !CantUnwindLocs.empty(); }
91   bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
92   bool hasPersonality() const {
93     return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
94   }
95 
96   void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
97   void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
98   void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
99   void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
100   void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
101 
102   void saveFPReg(int Reg) { FPReg = Reg; }
103   int getFPReg() const { return FPReg; }
104 
105   void emitFnStartLocNotes() const {
106     for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
107          FI != FE; ++FI)
108       Parser.Note(*FI, ".fnstart was specified here");
109   }
110   void emitCantUnwindLocNotes() const {
111     for (Locs::const_iterator UI = CantUnwindLocs.begin(),
112                               UE = CantUnwindLocs.end(); UI != UE; ++UI)
113       Parser.Note(*UI, ".cantunwind was specified here");
114   }
115   void emitHandlerDataLocNotes() const {
116     for (Locs::const_iterator HI = HandlerDataLocs.begin(),
117                               HE = HandlerDataLocs.end(); HI != HE; ++HI)
118       Parser.Note(*HI, ".handlerdata was specified here");
119   }
120   void emitPersonalityLocNotes() const {
121     for (Locs::const_iterator PI = PersonalityLocs.begin(),
122                               PE = PersonalityLocs.end(),
123                               PII = PersonalityIndexLocs.begin(),
124                               PIE = PersonalityIndexLocs.end();
125          PI != PE || PII != PIE;) {
126       if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
127         Parser.Note(*PI++, ".personality was specified here");
128       else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
129         Parser.Note(*PII++, ".personalityindex was specified here");
130       else
131         llvm_unreachable(".personality and .personalityindex cannot be "
132                          "at the same location");
133     }
134   }
135 
136   void reset() {
137     FnStartLocs = Locs();
138     CantUnwindLocs = Locs();
139     PersonalityLocs = Locs();
140     HandlerDataLocs = Locs();
141     PersonalityIndexLocs = Locs();
142     FPReg = ARM::SP;
143   }
144 };
145 
146 class ARMAsmParser : public MCTargetAsmParser {
147   const MCInstrInfo &MII;
148   const MCRegisterInfo *MRI;
149   UnwindContext UC;
150 
151   ARMTargetStreamer &getTargetStreamer() {
152     assert(getParser().getStreamer().getTargetStreamer() &&
153            "do not have a target streamer");
154     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
155     return static_cast<ARMTargetStreamer &>(TS);
156   }
157 
158   // Map of register aliases registers via the .req directive.
159   StringMap<unsigned> RegisterReqs;
160 
161   bool NextSymbolIsThumb;
162 
163   bool useImplicitITThumb() const {
164     return ImplicitItMode == ImplicitItModeTy::Always ||
165            ImplicitItMode == ImplicitItModeTy::ThumbOnly;
166   }
167 
168   bool useImplicitITARM() const {
169     return ImplicitItMode == ImplicitItModeTy::Always ||
170            ImplicitItMode == ImplicitItModeTy::ARMOnly;
171   }
172 
173   struct {
174     ARMCC::CondCodes Cond;    // Condition for IT block.
175     unsigned Mask:4;          // Condition mask for instructions.
176                               // Starting at first 1 (from lsb).
177                               //   '1'  condition as indicated in IT.
178                               //   '0'  inverse of condition (else).
179                               // Count of instructions in IT block is
180                               // 4 - trailingzeroes(mask)
181                               // Note that this does not have the same encoding
182                               // as in the IT instruction, which also depends
183                               // on the low bit of the condition code.
184 
185     unsigned CurPosition;     // Current position in parsing of IT
186                               // block. In range [0,4], with 0 being the IT
187                               // instruction itself. Initialized according to
188                               // count of instructions in block.  ~0U if no
189                               // active IT block.
190 
191     bool IsExplicit;          // true  - The IT instruction was present in the
192                               //         input, we should not modify it.
193                               // false - The IT instruction was added
194                               //         implicitly, we can extend it if that
195                               //         would be legal.
196   } ITState;
197 
198   llvm::SmallVector<MCInst, 4> PendingConditionalInsts;
199 
200   void flushPendingInstructions(MCStreamer &Out) override {
201     if (!inImplicitITBlock()) {
202       assert(PendingConditionalInsts.size() == 0);
203       return;
204     }
205 
206     // Emit the IT instruction
207     unsigned Mask = getITMaskEncoding();
208     MCInst ITInst;
209     ITInst.setOpcode(ARM::t2IT);
210     ITInst.addOperand(MCOperand::createImm(ITState.Cond));
211     ITInst.addOperand(MCOperand::createImm(Mask));
212     Out.EmitInstruction(ITInst, getSTI());
213 
214     // Emit the conditonal instructions
215     assert(PendingConditionalInsts.size() <= 4);
216     for (const MCInst &Inst : PendingConditionalInsts) {
217       Out.EmitInstruction(Inst, getSTI());
218     }
219     PendingConditionalInsts.clear();
220 
221     // Clear the IT state
222     ITState.Mask = 0;
223     ITState.CurPosition = ~0U;
224   }
225 
226   bool inITBlock() { return ITState.CurPosition != ~0U; }
227   bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
228   bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
229   bool lastInITBlock() {
230     return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
231   }
232   void forwardITPosition() {
233     if (!inITBlock()) return;
234     // Move to the next instruction in the IT block, if there is one. If not,
235     // mark the block as done, except for implicit IT blocks, which we leave
236     // open until we find an instruction that can't be added to it.
237     unsigned TZ = countTrailingZeros(ITState.Mask);
238     if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
239       ITState.CurPosition = ~0U; // Done with the IT block after this.
240   }
241 
242   // Rewind the state of the current IT block, removing the last slot from it.
243   void rewindImplicitITPosition() {
244     assert(inImplicitITBlock());
245     assert(ITState.CurPosition > 1);
246     ITState.CurPosition--;
247     unsigned TZ = countTrailingZeros(ITState.Mask);
248     unsigned NewMask = 0;
249     NewMask |= ITState.Mask & (0xC << TZ);
250     NewMask |= 0x2 << TZ;
251     ITState.Mask = NewMask;
252   }
253 
254   // Rewind the state of the current IT block, removing the last slot from it.
255   // If we were at the first slot, this closes the IT block.
256   void discardImplicitITBlock() {
257     assert(inImplicitITBlock());
258     assert(ITState.CurPosition == 1);
259     ITState.CurPosition = ~0U;
260     return;
261   }
262 
263   // Get the encoding of the IT mask, as it will appear in an IT instruction.
264   unsigned getITMaskEncoding() {
265     assert(inITBlock());
266     unsigned Mask = ITState.Mask;
267     unsigned TZ = countTrailingZeros(Mask);
268     if ((ITState.Cond & 1) == 0) {
269       assert(Mask && TZ <= 3 && "illegal IT mask value!");
270       Mask ^= (0xE << TZ) & 0xF;
271     }
272     return Mask;
273   }
274 
275   // Get the condition code corresponding to the current IT block slot.
276   ARMCC::CondCodes currentITCond() {
277     unsigned MaskBit;
278     if (ITState.CurPosition == 1)
279       MaskBit = 1;
280     else
281       MaskBit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
282 
283     return MaskBit ? ITState.Cond : ARMCC::getOppositeCondition(ITState.Cond);
284   }
285 
286   // Invert the condition of the current IT block slot without changing any
287   // other slots in the same block.
288   void invertCurrentITCondition() {
289     if (ITState.CurPosition == 1) {
290       ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
291     } else {
292       ITState.Mask ^= 1 << (5 - ITState.CurPosition);
293     }
294   }
295 
296   // Returns true if the current IT block is full (all 4 slots used).
297   bool isITBlockFull() {
298     return inITBlock() && (ITState.Mask & 1);
299   }
300 
301   // Extend the current implicit IT block to have one more slot with the given
302   // condition code.
303   void extendImplicitITBlock(ARMCC::CondCodes Cond) {
304     assert(inImplicitITBlock());
305     assert(!isITBlockFull());
306     assert(Cond == ITState.Cond ||
307            Cond == ARMCC::getOppositeCondition(ITState.Cond));
308     unsigned TZ = countTrailingZeros(ITState.Mask);
309     unsigned NewMask = 0;
310     // Keep any existing condition bits.
311     NewMask |= ITState.Mask & (0xE << TZ);
312     // Insert the new condition bit.
313     NewMask |= (Cond == ITState.Cond) << TZ;
314     // Move the trailing 1 down one bit.
315     NewMask |= 1 << (TZ - 1);
316     ITState.Mask = NewMask;
317   }
318 
319   // Create a new implicit IT block with a dummy condition code.
320   void startImplicitITBlock() {
321     assert(!inITBlock());
322     ITState.Cond = ARMCC::AL;
323     ITState.Mask = 8;
324     ITState.CurPosition = 1;
325     ITState.IsExplicit = false;
326     return;
327   }
328 
329   // Create a new explicit IT block with the given condition and mask. The mask
330   // should be in the parsed format, with a 1 implying 't', regardless of the
331   // low bit of the condition.
332   void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
333     assert(!inITBlock());
334     ITState.Cond = Cond;
335     ITState.Mask = Mask;
336     ITState.CurPosition = 0;
337     ITState.IsExplicit = true;
338     return;
339   }
340 
341   void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
342     return getParser().Note(L, Msg, Range);
343   }
344   bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
345     return getParser().Warning(L, Msg, Range);
346   }
347   bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
348     return getParser().Error(L, Msg, Range);
349   }
350 
351   bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
352                            unsigned ListNo, bool IsARPop = false);
353   bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
354                            unsigned ListNo);
355 
356   int tryParseRegister();
357   bool tryParseRegisterWithWriteBack(OperandVector &);
358   int tryParseShiftRegister(OperandVector &);
359   bool parseRegisterList(OperandVector &);
360   bool parseMemory(OperandVector &);
361   bool parseOperand(OperandVector &, StringRef Mnemonic);
362   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
363   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
364                               unsigned &ShiftAmount);
365   bool parseLiteralValues(unsigned Size, SMLoc L);
366   bool parseDirectiveThumb(SMLoc L);
367   bool parseDirectiveARM(SMLoc L);
368   bool parseDirectiveThumbFunc(SMLoc L);
369   bool parseDirectiveCode(SMLoc L);
370   bool parseDirectiveSyntax(SMLoc L);
371   bool parseDirectiveReq(StringRef Name, SMLoc L);
372   bool parseDirectiveUnreq(SMLoc L);
373   bool parseDirectiveArch(SMLoc L);
374   bool parseDirectiveEabiAttr(SMLoc L);
375   bool parseDirectiveCPU(SMLoc L);
376   bool parseDirectiveFPU(SMLoc L);
377   bool parseDirectiveFnStart(SMLoc L);
378   bool parseDirectiveFnEnd(SMLoc L);
379   bool parseDirectiveCantUnwind(SMLoc L);
380   bool parseDirectivePersonality(SMLoc L);
381   bool parseDirectiveHandlerData(SMLoc L);
382   bool parseDirectiveSetFP(SMLoc L);
383   bool parseDirectivePad(SMLoc L);
384   bool parseDirectiveRegSave(SMLoc L, bool IsVector);
385   bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
386   bool parseDirectiveLtorg(SMLoc L);
387   bool parseDirectiveEven(SMLoc L);
388   bool parseDirectivePersonalityIndex(SMLoc L);
389   bool parseDirectiveUnwindRaw(SMLoc L);
390   bool parseDirectiveTLSDescSeq(SMLoc L);
391   bool parseDirectiveMovSP(SMLoc L);
392   bool parseDirectiveObjectArch(SMLoc L);
393   bool parseDirectiveArchExtension(SMLoc L);
394   bool parseDirectiveAlign(SMLoc L);
395   bool parseDirectiveThumbSet(SMLoc L);
396 
397   StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
398                           bool &CarrySetting, unsigned &ProcessorIMod,
399                           StringRef &ITMask);
400   void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
401                              bool &CanAcceptCarrySet,
402                              bool &CanAcceptPredicationCode);
403 
404   void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
405                                      OperandVector &Operands);
406   bool isThumb() const {
407     // FIXME: Can tablegen auto-generate this?
408     return getSTI().getFeatureBits()[ARM::ModeThumb];
409   }
410   bool isThumbOne() const {
411     return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
412   }
413   bool isThumbTwo() const {
414     return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
415   }
416   bool hasThumb() const {
417     return getSTI().getFeatureBits()[ARM::HasV4TOps];
418   }
419   bool hasThumb2() const {
420     return getSTI().getFeatureBits()[ARM::FeatureThumb2];
421   }
422   bool hasV6Ops() const {
423     return getSTI().getFeatureBits()[ARM::HasV6Ops];
424   }
425   bool hasV6T2Ops() const {
426     return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
427   }
428   bool hasV6MOps() const {
429     return getSTI().getFeatureBits()[ARM::HasV6MOps];
430   }
431   bool hasV7Ops() const {
432     return getSTI().getFeatureBits()[ARM::HasV7Ops];
433   }
434   bool hasV8Ops() const {
435     return getSTI().getFeatureBits()[ARM::HasV8Ops];
436   }
437   bool hasV8MBaseline() const {
438     return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
439   }
440   bool hasV8MMainline() const {
441     return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
442   }
443   bool has8MSecExt() const {
444     return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
445   }
446   bool hasARM() const {
447     return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
448   }
449   bool hasDSP() const {
450     return getSTI().getFeatureBits()[ARM::FeatureDSP];
451   }
452   bool hasD16() const {
453     return getSTI().getFeatureBits()[ARM::FeatureD16];
454   }
455   bool hasV8_1aOps() const {
456     return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
457   }
458   bool hasRAS() const {
459     return getSTI().getFeatureBits()[ARM::FeatureRAS];
460   }
461 
462   void SwitchMode() {
463     MCSubtargetInfo &STI = copySTI();
464     uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
465     setAvailableFeatures(FB);
466   }
467   void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
468   bool isMClass() const {
469     return getSTI().getFeatureBits()[ARM::FeatureMClass];
470   }
471 
472   /// @name Auto-generated Match Functions
473   /// {
474 
475 #define GET_ASSEMBLER_HEADER
476 #include "ARMGenAsmMatcher.inc"
477 
478   /// }
479 
480   OperandMatchResultTy parseITCondCode(OperandVector &);
481   OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
482   OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
483   OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
484   OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
485   OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
486   OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
487   OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
488   OperandMatchResultTy parseBankedRegOperand(OperandVector &);
489   OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
490                                    int High);
491   OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
492     return parsePKHImm(O, "lsl", 0, 31);
493   }
494   OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
495     return parsePKHImm(O, "asr", 1, 32);
496   }
497   OperandMatchResultTy parseSetEndImm(OperandVector &);
498   OperandMatchResultTy parseShifterImm(OperandVector &);
499   OperandMatchResultTy parseRotImm(OperandVector &);
500   OperandMatchResultTy parseModImm(OperandVector &);
501   OperandMatchResultTy parseBitfield(OperandVector &);
502   OperandMatchResultTy parsePostIdxReg(OperandVector &);
503   OperandMatchResultTy parseAM3Offset(OperandVector &);
504   OperandMatchResultTy parseFPImm(OperandVector &);
505   OperandMatchResultTy parseVectorList(OperandVector &);
506   OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
507                                        SMLoc &EndLoc);
508 
509   // Asm Match Converter Methods
510   void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
511   void cvtThumbBranches(MCInst &Inst, const OperandVector &);
512 
513   bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
514   bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
515   bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
516   bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
517   bool isITBlockTerminator(MCInst &Inst) const;
518 
519 public:
520   enum ARMMatchResultTy {
521     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
522     Match_RequiresNotITBlock,
523     Match_RequiresV6,
524     Match_RequiresThumb2,
525     Match_RequiresV8,
526 #define GET_OPERAND_DIAGNOSTIC_TYPES
527 #include "ARMGenAsmMatcher.inc"
528 
529   };
530 
531   ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
532                const MCInstrInfo &MII, const MCTargetOptions &Options)
533     : MCTargetAsmParser(Options, STI), MII(MII), UC(Parser) {
534     MCAsmParserExtension::Initialize(Parser);
535 
536     // Cache the MCRegisterInfo.
537     MRI = getContext().getRegisterInfo();
538 
539     // Initialize the set of available features.
540     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
541 
542     // Not in an ITBlock to start with.
543     ITState.CurPosition = ~0U;
544 
545     NextSymbolIsThumb = false;
546   }
547 
548   // Implementation of the MCTargetAsmParser interface:
549   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
550   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
551                         SMLoc NameLoc, OperandVector &Operands) override;
552   bool ParseDirective(AsmToken DirectiveID) override;
553 
554   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
555                                       unsigned Kind) override;
556   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
557 
558   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
559                                OperandVector &Operands, MCStreamer &Out,
560                                uint64_t &ErrorInfo,
561                                bool MatchingInlineAsm) override;
562   unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
563                             uint64_t &ErrorInfo, bool MatchingInlineAsm,
564                             bool &EmitInITBlock, MCStreamer &Out);
565   void onLabelParsed(MCSymbol *Symbol) override;
566 };
567 } // end anonymous namespace
568 
569 namespace {
570 
571 /// ARMOperand - Instances of this class represent a parsed ARM machine
572 /// operand.
573 class ARMOperand : public MCParsedAsmOperand {
574   enum KindTy {
575     k_CondCode,
576     k_CCOut,
577     k_ITCondMask,
578     k_CoprocNum,
579     k_CoprocReg,
580     k_CoprocOption,
581     k_Immediate,
582     k_MemBarrierOpt,
583     k_InstSyncBarrierOpt,
584     k_Memory,
585     k_PostIndexRegister,
586     k_MSRMask,
587     k_BankedReg,
588     k_ProcIFlags,
589     k_VectorIndex,
590     k_Register,
591     k_RegisterList,
592     k_DPRRegisterList,
593     k_SPRRegisterList,
594     k_VectorList,
595     k_VectorListAllLanes,
596     k_VectorListIndexed,
597     k_ShiftedRegister,
598     k_ShiftedImmediate,
599     k_ShifterImmediate,
600     k_RotateImmediate,
601     k_ModifiedImmediate,
602     k_ConstantPoolImmediate,
603     k_BitfieldDescriptor,
604     k_Token,
605   } Kind;
606 
607   SMLoc StartLoc, EndLoc, AlignmentLoc;
608   SmallVector<unsigned, 8> Registers;
609 
610   struct CCOp {
611     ARMCC::CondCodes Val;
612   };
613 
614   struct CopOp {
615     unsigned Val;
616   };
617 
618   struct CoprocOptionOp {
619     unsigned Val;
620   };
621 
622   struct ITMaskOp {
623     unsigned Mask:4;
624   };
625 
626   struct MBOptOp {
627     ARM_MB::MemBOpt Val;
628   };
629 
630   struct ISBOptOp {
631     ARM_ISB::InstSyncBOpt Val;
632   };
633 
634   struct IFlagsOp {
635     ARM_PROC::IFlags Val;
636   };
637 
638   struct MMaskOp {
639     unsigned Val;
640   };
641 
642   struct BankedRegOp {
643     unsigned Val;
644   };
645 
646   struct TokOp {
647     const char *Data;
648     unsigned Length;
649   };
650 
651   struct RegOp {
652     unsigned RegNum;
653   };
654 
655   // A vector register list is a sequential list of 1 to 4 registers.
656   struct VectorListOp {
657     unsigned RegNum;
658     unsigned Count;
659     unsigned LaneIndex;
660     bool isDoubleSpaced;
661   };
662 
663   struct VectorIndexOp {
664     unsigned Val;
665   };
666 
667   struct ImmOp {
668     const MCExpr *Val;
669   };
670 
671   /// Combined record for all forms of ARM address expressions.
672   struct MemoryOp {
673     unsigned BaseRegNum;
674     // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
675     // was specified.
676     const MCConstantExpr *OffsetImm;  // Offset immediate value
677     unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
678     ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
679     unsigned ShiftImm;        // shift for OffsetReg.
680     unsigned Alignment;       // 0 = no alignment specified
681     // n = alignment in bytes (2, 4, 8, 16, or 32)
682     unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
683   };
684 
685   struct PostIdxRegOp {
686     unsigned RegNum;
687     bool isAdd;
688     ARM_AM::ShiftOpc ShiftTy;
689     unsigned ShiftImm;
690   };
691 
692   struct ShifterImmOp {
693     bool isASR;
694     unsigned Imm;
695   };
696 
697   struct RegShiftedRegOp {
698     ARM_AM::ShiftOpc ShiftTy;
699     unsigned SrcReg;
700     unsigned ShiftReg;
701     unsigned ShiftImm;
702   };
703 
704   struct RegShiftedImmOp {
705     ARM_AM::ShiftOpc ShiftTy;
706     unsigned SrcReg;
707     unsigned ShiftImm;
708   };
709 
710   struct RotImmOp {
711     unsigned Imm;
712   };
713 
714   struct ModImmOp {
715     unsigned Bits;
716     unsigned Rot;
717   };
718 
719   struct BitfieldOp {
720     unsigned LSB;
721     unsigned Width;
722   };
723 
724   union {
725     struct CCOp CC;
726     struct CopOp Cop;
727     struct CoprocOptionOp CoprocOption;
728     struct MBOptOp MBOpt;
729     struct ISBOptOp ISBOpt;
730     struct ITMaskOp ITMask;
731     struct IFlagsOp IFlags;
732     struct MMaskOp MMask;
733     struct BankedRegOp BankedReg;
734     struct TokOp Tok;
735     struct RegOp Reg;
736     struct VectorListOp VectorList;
737     struct VectorIndexOp VectorIndex;
738     struct ImmOp Imm;
739     struct MemoryOp Memory;
740     struct PostIdxRegOp PostIdxReg;
741     struct ShifterImmOp ShifterImm;
742     struct RegShiftedRegOp RegShiftedReg;
743     struct RegShiftedImmOp RegShiftedImm;
744     struct RotImmOp RotImm;
745     struct ModImmOp ModImm;
746     struct BitfieldOp Bitfield;
747   };
748 
749 public:
750   ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
751 
752   /// getStartLoc - Get the location of the first token of this operand.
753   SMLoc getStartLoc() const override { return StartLoc; }
754   /// getEndLoc - Get the location of the last token of this operand.
755   SMLoc getEndLoc() const override { return EndLoc; }
756   /// getLocRange - Get the range between the first and last token of this
757   /// operand.
758   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
759 
760   /// getAlignmentLoc - Get the location of the Alignment token of this operand.
761   SMLoc getAlignmentLoc() const {
762     assert(Kind == k_Memory && "Invalid access!");
763     return AlignmentLoc;
764   }
765 
766   ARMCC::CondCodes getCondCode() const {
767     assert(Kind == k_CondCode && "Invalid access!");
768     return CC.Val;
769   }
770 
771   unsigned getCoproc() const {
772     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
773     return Cop.Val;
774   }
775 
776   StringRef getToken() const {
777     assert(Kind == k_Token && "Invalid access!");
778     return StringRef(Tok.Data, Tok.Length);
779   }
780 
781   unsigned getReg() const override {
782     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
783     return Reg.RegNum;
784   }
785 
786   const SmallVectorImpl<unsigned> &getRegList() const {
787     assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
788             Kind == k_SPRRegisterList) && "Invalid access!");
789     return Registers;
790   }
791 
792   const MCExpr *getImm() const {
793     assert(isImm() && "Invalid access!");
794     return Imm.Val;
795   }
796 
797   const MCExpr *getConstantPoolImm() const {
798     assert(isConstantPoolImm() && "Invalid access!");
799     return Imm.Val;
800   }
801 
802   unsigned getVectorIndex() const {
803     assert(Kind == k_VectorIndex && "Invalid access!");
804     return VectorIndex.Val;
805   }
806 
807   ARM_MB::MemBOpt getMemBarrierOpt() const {
808     assert(Kind == k_MemBarrierOpt && "Invalid access!");
809     return MBOpt.Val;
810   }
811 
812   ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
813     assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
814     return ISBOpt.Val;
815   }
816 
817   ARM_PROC::IFlags getProcIFlags() const {
818     assert(Kind == k_ProcIFlags && "Invalid access!");
819     return IFlags.Val;
820   }
821 
822   unsigned getMSRMask() const {
823     assert(Kind == k_MSRMask && "Invalid access!");
824     return MMask.Val;
825   }
826 
827   unsigned getBankedReg() const {
828     assert(Kind == k_BankedReg && "Invalid access!");
829     return BankedReg.Val;
830   }
831 
832   bool isCoprocNum() const { return Kind == k_CoprocNum; }
833   bool isCoprocReg() const { return Kind == k_CoprocReg; }
834   bool isCoprocOption() const { return Kind == k_CoprocOption; }
835   bool isCondCode() const { return Kind == k_CondCode; }
836   bool isCCOut() const { return Kind == k_CCOut; }
837   bool isITMask() const { return Kind == k_ITCondMask; }
838   bool isITCondCode() const { return Kind == k_CondCode; }
839   bool isImm() const override {
840     return Kind == k_Immediate;
841   }
842 
843   bool isARMBranchTarget() const {
844     if (!isImm()) return false;
845 
846     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
847       return CE->getValue() % 4 == 0;
848     return true;
849   }
850 
851 
852   bool isThumbBranchTarget() const {
853     if (!isImm()) return false;
854 
855     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
856       return CE->getValue() % 2 == 0;
857     return true;
858   }
859 
860   // checks whether this operand is an unsigned offset which fits is a field
861   // of specified width and scaled by a specific number of bits
862   template<unsigned width, unsigned scale>
863   bool isUnsignedOffset() const {
864     if (!isImm()) return false;
865     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
866     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
867       int64_t Val = CE->getValue();
868       int64_t Align = 1LL << scale;
869       int64_t Max = Align * ((1LL << width) - 1);
870       return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
871     }
872     return false;
873   }
874   // checks whether this operand is an signed offset which fits is a field
875   // of specified width and scaled by a specific number of bits
876   template<unsigned width, unsigned scale>
877   bool isSignedOffset() const {
878     if (!isImm()) return false;
879     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
880     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
881       int64_t Val = CE->getValue();
882       int64_t Align = 1LL << scale;
883       int64_t Max = Align * ((1LL << (width-1)) - 1);
884       int64_t Min = -Align * (1LL << (width-1));
885       return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
886     }
887     return false;
888   }
889 
890   // checks whether this operand is a memory operand computed as an offset
891   // applied to PC. the offset may have 8 bits of magnitude and is represented
892   // with two bits of shift. textually it may be either [pc, #imm], #imm or
893   // relocable expression...
894   bool isThumbMemPC() const {
895     int64_t Val = 0;
896     if (isImm()) {
897       if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
898       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
899       if (!CE) return false;
900       Val = CE->getValue();
901     }
902     else if (isMem()) {
903       if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
904       if(Memory.BaseRegNum != ARM::PC) return false;
905       Val = Memory.OffsetImm->getValue();
906     }
907     else return false;
908     return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
909   }
910   bool isFPImm() const {
911     if (!isImm()) return false;
912     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
913     if (!CE) return false;
914     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
915     return Val != -1;
916   }
917   bool isFBits16() const {
918     if (!isImm()) return false;
919     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
920     if (!CE) return false;
921     int64_t Value = CE->getValue();
922     return Value >= 0 && Value <= 16;
923   }
924   bool isFBits32() const {
925     if (!isImm()) return false;
926     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
927     if (!CE) return false;
928     int64_t Value = CE->getValue();
929     return Value >= 1 && Value <= 32;
930   }
931   bool isImm8s4() const {
932     if (!isImm()) return false;
933     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
934     if (!CE) return false;
935     int64_t Value = CE->getValue();
936     return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
937   }
938   bool isImm0_1020s4() const {
939     if (!isImm()) return false;
940     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
941     if (!CE) return false;
942     int64_t Value = CE->getValue();
943     return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
944   }
945   bool isImm0_508s4() const {
946     if (!isImm()) return false;
947     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
948     if (!CE) return false;
949     int64_t Value = CE->getValue();
950     return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
951   }
952   bool isImm0_508s4Neg() const {
953     if (!isImm()) return false;
954     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
955     if (!CE) return false;
956     int64_t Value = -CE->getValue();
957     // explicitly exclude zero. we want that to use the normal 0_508 version.
958     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
959   }
960   bool isImm0_239() const {
961     if (!isImm()) return false;
962     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
963     if (!CE) return false;
964     int64_t Value = CE->getValue();
965     return Value >= 0 && Value < 240;
966   }
967   bool isImm0_255() const {
968     if (!isImm()) return false;
969     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
970     if (!CE) return false;
971     int64_t Value = CE->getValue();
972     return Value >= 0 && Value < 256;
973   }
974   bool isImm0_4095() const {
975     if (!isImm()) return false;
976     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
977     if (!CE) return false;
978     int64_t Value = CE->getValue();
979     return Value >= 0 && Value < 4096;
980   }
981   bool isImm0_4095Neg() const {
982     if (!isImm()) return false;
983     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
984     if (!CE) return false;
985     int64_t Value = -CE->getValue();
986     return Value > 0 && Value < 4096;
987   }
988   bool isImm0_1() const {
989     if (!isImm()) return false;
990     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
991     if (!CE) return false;
992     int64_t Value = CE->getValue();
993     return Value >= 0 && Value < 2;
994   }
995   bool isImm0_3() const {
996     if (!isImm()) return false;
997     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
998     if (!CE) return false;
999     int64_t Value = CE->getValue();
1000     return Value >= 0 && Value < 4;
1001   }
1002   bool isImm0_7() const {
1003     if (!isImm()) return false;
1004     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1005     if (!CE) return false;
1006     int64_t Value = CE->getValue();
1007     return Value >= 0 && Value < 8;
1008   }
1009   bool isImm0_15() const {
1010     if (!isImm()) return false;
1011     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1012     if (!CE) return false;
1013     int64_t Value = CE->getValue();
1014     return Value >= 0 && Value < 16;
1015   }
1016   bool isImm0_31() const {
1017     if (!isImm()) return false;
1018     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1019     if (!CE) return false;
1020     int64_t Value = CE->getValue();
1021     return Value >= 0 && Value < 32;
1022   }
1023   bool isImm0_63() const {
1024     if (!isImm()) return false;
1025     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1026     if (!CE) return false;
1027     int64_t Value = CE->getValue();
1028     return Value >= 0 && Value < 64;
1029   }
1030   bool isImm8() const {
1031     if (!isImm()) return false;
1032     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1033     if (!CE) return false;
1034     int64_t Value = CE->getValue();
1035     return Value == 8;
1036   }
1037   bool isImm16() const {
1038     if (!isImm()) return false;
1039     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1040     if (!CE) return false;
1041     int64_t Value = CE->getValue();
1042     return Value == 16;
1043   }
1044   bool isImm32() const {
1045     if (!isImm()) return false;
1046     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1047     if (!CE) return false;
1048     int64_t Value = CE->getValue();
1049     return Value == 32;
1050   }
1051   bool isShrImm8() const {
1052     if (!isImm()) return false;
1053     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1054     if (!CE) return false;
1055     int64_t Value = CE->getValue();
1056     return Value > 0 && Value <= 8;
1057   }
1058   bool isShrImm16() const {
1059     if (!isImm()) return false;
1060     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1061     if (!CE) return false;
1062     int64_t Value = CE->getValue();
1063     return Value > 0 && Value <= 16;
1064   }
1065   bool isShrImm32() const {
1066     if (!isImm()) return false;
1067     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1068     if (!CE) return false;
1069     int64_t Value = CE->getValue();
1070     return Value > 0 && Value <= 32;
1071   }
1072   bool isShrImm64() const {
1073     if (!isImm()) return false;
1074     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1075     if (!CE) return false;
1076     int64_t Value = CE->getValue();
1077     return Value > 0 && Value <= 64;
1078   }
1079   bool isImm1_7() const {
1080     if (!isImm()) return false;
1081     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1082     if (!CE) return false;
1083     int64_t Value = CE->getValue();
1084     return Value > 0 && Value < 8;
1085   }
1086   bool isImm1_15() const {
1087     if (!isImm()) return false;
1088     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1089     if (!CE) return false;
1090     int64_t Value = CE->getValue();
1091     return Value > 0 && Value < 16;
1092   }
1093   bool isImm1_31() const {
1094     if (!isImm()) return false;
1095     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1096     if (!CE) return false;
1097     int64_t Value = CE->getValue();
1098     return Value > 0 && Value < 32;
1099   }
1100   bool isImm1_16() const {
1101     if (!isImm()) return false;
1102     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1103     if (!CE) return false;
1104     int64_t Value = CE->getValue();
1105     return Value > 0 && Value < 17;
1106   }
1107   bool isImm1_32() const {
1108     if (!isImm()) return false;
1109     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1110     if (!CE) return false;
1111     int64_t Value = CE->getValue();
1112     return Value > 0 && Value < 33;
1113   }
1114   bool isImm0_32() const {
1115     if (!isImm()) return false;
1116     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1117     if (!CE) return false;
1118     int64_t Value = CE->getValue();
1119     return Value >= 0 && Value < 33;
1120   }
1121   bool isImm0_65535() const {
1122     if (!isImm()) return false;
1123     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1124     if (!CE) return false;
1125     int64_t Value = CE->getValue();
1126     return Value >= 0 && Value < 65536;
1127   }
1128   bool isImm256_65535Expr() const {
1129     if (!isImm()) return false;
1130     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1131     // If it's not a constant expression, it'll generate a fixup and be
1132     // handled later.
1133     if (!CE) return true;
1134     int64_t Value = CE->getValue();
1135     return Value >= 256 && Value < 65536;
1136   }
1137   bool isImm0_65535Expr() const {
1138     if (!isImm()) return false;
1139     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1140     // If it's not a constant expression, it'll generate a fixup and be
1141     // handled later.
1142     if (!CE) return true;
1143     int64_t Value = CE->getValue();
1144     return Value >= 0 && Value < 65536;
1145   }
1146   bool isImm24bit() const {
1147     if (!isImm()) return false;
1148     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1149     if (!CE) return false;
1150     int64_t Value = CE->getValue();
1151     return Value >= 0 && Value <= 0xffffff;
1152   }
1153   bool isImmThumbSR() const {
1154     if (!isImm()) return false;
1155     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1156     if (!CE) return false;
1157     int64_t Value = CE->getValue();
1158     return Value > 0 && Value < 33;
1159   }
1160   bool isPKHLSLImm() const {
1161     if (!isImm()) return false;
1162     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1163     if (!CE) return false;
1164     int64_t Value = CE->getValue();
1165     return Value >= 0 && Value < 32;
1166   }
1167   bool isPKHASRImm() const {
1168     if (!isImm()) return false;
1169     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1170     if (!CE) return false;
1171     int64_t Value = CE->getValue();
1172     return Value > 0 && Value <= 32;
1173   }
1174   bool isAdrLabel() const {
1175     // If we have an immediate that's not a constant, treat it as a label
1176     // reference needing a fixup.
1177     if (isImm() && !isa<MCConstantExpr>(getImm()))
1178       return true;
1179 
1180     // If it is a constant, it must fit into a modified immediate encoding.
1181     if (!isImm()) return false;
1182     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1183     if (!CE) return false;
1184     int64_t Value = CE->getValue();
1185     return (ARM_AM::getSOImmVal(Value) != -1 ||
1186             ARM_AM::getSOImmVal(-Value) != -1);
1187   }
1188   bool isT2SOImm() const {
1189     if (!isImm()) return false;
1190     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1191     if (!CE) return false;
1192     int64_t Value = CE->getValue();
1193     return ARM_AM::getT2SOImmVal(Value) != -1;
1194   }
1195   bool isT2SOImmNot() const {
1196     if (!isImm()) return false;
1197     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1198     if (!CE) return false;
1199     int64_t Value = CE->getValue();
1200     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1201       ARM_AM::getT2SOImmVal(~Value) != -1;
1202   }
1203   bool isT2SOImmNeg() const {
1204     if (!isImm()) return false;
1205     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1206     if (!CE) return false;
1207     int64_t Value = CE->getValue();
1208     // Only use this when not representable as a plain so_imm.
1209     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1210       ARM_AM::getT2SOImmVal(-Value) != -1;
1211   }
1212   bool isSetEndImm() const {
1213     if (!isImm()) return false;
1214     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1215     if (!CE) return false;
1216     int64_t Value = CE->getValue();
1217     return Value == 1 || Value == 0;
1218   }
1219   bool isReg() const override { return Kind == k_Register; }
1220   bool isRegList() const { return Kind == k_RegisterList; }
1221   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1222   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1223   bool isToken() const override { return Kind == k_Token; }
1224   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1225   bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1226   bool isMem() const override { return Kind == k_Memory; }
1227   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1228   bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
1229   bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
1230   bool isRotImm() const { return Kind == k_RotateImmediate; }
1231   bool isModImm() const { return Kind == k_ModifiedImmediate; }
1232   bool isModImmNot() const {
1233     if (!isImm()) return false;
1234     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1235     if (!CE) return false;
1236     int64_t Value = CE->getValue();
1237     return ARM_AM::getSOImmVal(~Value) != -1;
1238   }
1239   bool isModImmNeg() const {
1240     if (!isImm()) return false;
1241     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1242     if (!CE) return false;
1243     int64_t Value = CE->getValue();
1244     return ARM_AM::getSOImmVal(Value) == -1 &&
1245       ARM_AM::getSOImmVal(-Value) != -1;
1246   }
1247   bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1248   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1249   bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
1250   bool isPostIdxReg() const {
1251     return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
1252   }
1253   bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1254     if (!isMem())
1255       return false;
1256     // No offset of any kind.
1257     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1258      (alignOK || Memory.Alignment == Alignment);
1259   }
1260   bool isMemPCRelImm12() const {
1261     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1262       return false;
1263     // Base register must be PC.
1264     if (Memory.BaseRegNum != ARM::PC)
1265       return false;
1266     // Immediate offset in range [-4095, 4095].
1267     if (!Memory.OffsetImm) return true;
1268     int64_t Val = Memory.OffsetImm->getValue();
1269     return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1270   }
1271   bool isAlignedMemory() const {
1272     return isMemNoOffset(true);
1273   }
1274   bool isAlignedMemoryNone() const {
1275     return isMemNoOffset(false, 0);
1276   }
1277   bool isDupAlignedMemoryNone() const {
1278     return isMemNoOffset(false, 0);
1279   }
1280   bool isAlignedMemory16() const {
1281     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1282       return true;
1283     return isMemNoOffset(false, 0);
1284   }
1285   bool isDupAlignedMemory16() const {
1286     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1287       return true;
1288     return isMemNoOffset(false, 0);
1289   }
1290   bool isAlignedMemory32() const {
1291     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1292       return true;
1293     return isMemNoOffset(false, 0);
1294   }
1295   bool isDupAlignedMemory32() const {
1296     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1297       return true;
1298     return isMemNoOffset(false, 0);
1299   }
1300   bool isAlignedMemory64() const {
1301     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1302       return true;
1303     return isMemNoOffset(false, 0);
1304   }
1305   bool isDupAlignedMemory64() const {
1306     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1307       return true;
1308     return isMemNoOffset(false, 0);
1309   }
1310   bool isAlignedMemory64or128() const {
1311     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1312       return true;
1313     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1314       return true;
1315     return isMemNoOffset(false, 0);
1316   }
1317   bool isDupAlignedMemory64or128() const {
1318     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1319       return true;
1320     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1321       return true;
1322     return isMemNoOffset(false, 0);
1323   }
1324   bool isAlignedMemory64or128or256() const {
1325     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1326       return true;
1327     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1328       return true;
1329     if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1330       return true;
1331     return isMemNoOffset(false, 0);
1332   }
1333   bool isAddrMode2() const {
1334     if (!isMem() || Memory.Alignment != 0) return false;
1335     // Check for register offset.
1336     if (Memory.OffsetRegNum) return true;
1337     // Immediate offset in range [-4095, 4095].
1338     if (!Memory.OffsetImm) return true;
1339     int64_t Val = Memory.OffsetImm->getValue();
1340     return Val > -4096 && Val < 4096;
1341   }
1342   bool isAM2OffsetImm() const {
1343     if (!isImm()) return false;
1344     // Immediate offset in range [-4095, 4095].
1345     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1346     if (!CE) return false;
1347     int64_t Val = CE->getValue();
1348     return (Val == INT32_MIN) || (Val > -4096 && Val < 4096);
1349   }
1350   bool isAddrMode3() const {
1351     // If we have an immediate that's not a constant, treat it as a label
1352     // reference needing a fixup. If it is a constant, it's something else
1353     // and we reject it.
1354     if (isImm() && !isa<MCConstantExpr>(getImm()))
1355       return true;
1356     if (!isMem() || Memory.Alignment != 0) return false;
1357     // No shifts are legal for AM3.
1358     if (Memory.ShiftType != ARM_AM::no_shift) return false;
1359     // Check for register offset.
1360     if (Memory.OffsetRegNum) return true;
1361     // Immediate offset in range [-255, 255].
1362     if (!Memory.OffsetImm) return true;
1363     int64_t Val = Memory.OffsetImm->getValue();
1364     // The #-0 offset is encoded as INT32_MIN, and we have to check
1365     // for this too.
1366     return (Val > -256 && Val < 256) || Val == INT32_MIN;
1367   }
1368   bool isAM3Offset() const {
1369     if (Kind != k_Immediate && Kind != k_PostIndexRegister)
1370       return false;
1371     if (Kind == k_PostIndexRegister)
1372       return PostIdxReg.ShiftTy == ARM_AM::no_shift;
1373     // Immediate offset in range [-255, 255].
1374     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1375     if (!CE) return false;
1376     int64_t Val = CE->getValue();
1377     // Special case, #-0 is INT32_MIN.
1378     return (Val > -256 && Val < 256) || Val == INT32_MIN;
1379   }
1380   bool isAddrMode5() const {
1381     // If we have an immediate that's not a constant, treat it as a label
1382     // reference needing a fixup. If it is a constant, it's something else
1383     // and we reject it.
1384     if (isImm() && !isa<MCConstantExpr>(getImm()))
1385       return true;
1386     if (!isMem() || Memory.Alignment != 0) return false;
1387     // Check for register offset.
1388     if (Memory.OffsetRegNum) return false;
1389     // Immediate offset in range [-1020, 1020] and a multiple of 4.
1390     if (!Memory.OffsetImm) return true;
1391     int64_t Val = Memory.OffsetImm->getValue();
1392     return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1393       Val == INT32_MIN;
1394   }
1395   bool isAddrMode5FP16() const {
1396     // If we have an immediate that's not a constant, treat it as a label
1397     // reference needing a fixup. If it is a constant, it's something else
1398     // and we reject it.
1399     if (isImm() && !isa<MCConstantExpr>(getImm()))
1400       return true;
1401     if (!isMem() || Memory.Alignment != 0) return false;
1402     // Check for register offset.
1403     if (Memory.OffsetRegNum) return false;
1404     // Immediate offset in range [-510, 510] and a multiple of 2.
1405     if (!Memory.OffsetImm) return true;
1406     int64_t Val = Memory.OffsetImm->getValue();
1407     return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) || Val == INT32_MIN;
1408   }
1409   bool isMemTBB() const {
1410     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1411         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1412       return false;
1413     return true;
1414   }
1415   bool isMemTBH() const {
1416     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1417         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1418         Memory.Alignment != 0 )
1419       return false;
1420     return true;
1421   }
1422   bool isMemRegOffset() const {
1423     if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1424       return false;
1425     return true;
1426   }
1427   bool isT2MemRegOffset() const {
1428     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1429         Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1430       return false;
1431     // Only lsl #{0, 1, 2, 3} allowed.
1432     if (Memory.ShiftType == ARM_AM::no_shift)
1433       return true;
1434     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1435       return false;
1436     return true;
1437   }
1438   bool isMemThumbRR() const {
1439     // Thumb reg+reg addressing is simple. Just two registers, a base and
1440     // an offset. No shifts, negations or any other complicating factors.
1441     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1442         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1443       return false;
1444     return isARMLowRegister(Memory.BaseRegNum) &&
1445       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1446   }
1447   bool isMemThumbRIs4() const {
1448     if (!isMem() || Memory.OffsetRegNum != 0 ||
1449         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1450       return false;
1451     // Immediate offset, multiple of 4 in range [0, 124].
1452     if (!Memory.OffsetImm) return true;
1453     int64_t Val = Memory.OffsetImm->getValue();
1454     return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1455   }
1456   bool isMemThumbRIs2() const {
1457     if (!isMem() || Memory.OffsetRegNum != 0 ||
1458         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1459       return false;
1460     // Immediate offset, multiple of 4 in range [0, 62].
1461     if (!Memory.OffsetImm) return true;
1462     int64_t Val = Memory.OffsetImm->getValue();
1463     return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1464   }
1465   bool isMemThumbRIs1() const {
1466     if (!isMem() || Memory.OffsetRegNum != 0 ||
1467         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1468       return false;
1469     // Immediate offset in range [0, 31].
1470     if (!Memory.OffsetImm) return true;
1471     int64_t Val = Memory.OffsetImm->getValue();
1472     return Val >= 0 && Val <= 31;
1473   }
1474   bool isMemThumbSPI() const {
1475     if (!isMem() || Memory.OffsetRegNum != 0 ||
1476         Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1477       return false;
1478     // Immediate offset, multiple of 4 in range [0, 1020].
1479     if (!Memory.OffsetImm) return true;
1480     int64_t Val = Memory.OffsetImm->getValue();
1481     return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1482   }
1483   bool isMemImm8s4Offset() const {
1484     // If we have an immediate that's not a constant, treat it as a label
1485     // reference needing a fixup. If it is a constant, it's something else
1486     // and we reject it.
1487     if (isImm() && !isa<MCConstantExpr>(getImm()))
1488       return true;
1489     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1490       return false;
1491     // Immediate offset a multiple of 4 in range [-1020, 1020].
1492     if (!Memory.OffsetImm) return true;
1493     int64_t Val = Memory.OffsetImm->getValue();
1494     // Special case, #-0 is INT32_MIN.
1495     return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
1496   }
1497   bool isMemImm0_1020s4Offset() const {
1498     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1499       return false;
1500     // Immediate offset a multiple of 4 in range [0, 1020].
1501     if (!Memory.OffsetImm) return true;
1502     int64_t Val = Memory.OffsetImm->getValue();
1503     return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1504   }
1505   bool isMemImm8Offset() const {
1506     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1507       return false;
1508     // Base reg of PC isn't allowed for these encodings.
1509     if (Memory.BaseRegNum == ARM::PC) return false;
1510     // Immediate offset in range [-255, 255].
1511     if (!Memory.OffsetImm) return true;
1512     int64_t Val = Memory.OffsetImm->getValue();
1513     return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1514   }
1515   bool isMemPosImm8Offset() const {
1516     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1517       return false;
1518     // Immediate offset in range [0, 255].
1519     if (!Memory.OffsetImm) return true;
1520     int64_t Val = Memory.OffsetImm->getValue();
1521     return Val >= 0 && Val < 256;
1522   }
1523   bool isMemNegImm8Offset() const {
1524     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1525       return false;
1526     // Base reg of PC isn't allowed for these encodings.
1527     if (Memory.BaseRegNum == ARM::PC) return false;
1528     // Immediate offset in range [-255, -1].
1529     if (!Memory.OffsetImm) return false;
1530     int64_t Val = Memory.OffsetImm->getValue();
1531     return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1532   }
1533   bool isMemUImm12Offset() const {
1534     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1535       return false;
1536     // Immediate offset in range [0, 4095].
1537     if (!Memory.OffsetImm) return true;
1538     int64_t Val = Memory.OffsetImm->getValue();
1539     return (Val >= 0 && Val < 4096);
1540   }
1541   bool isMemImm12Offset() const {
1542     // If we have an immediate that's not a constant, treat it as a label
1543     // reference needing a fixup. If it is a constant, it's something else
1544     // and we reject it.
1545 
1546     if (isImm() && !isa<MCConstantExpr>(getImm()))
1547       return true;
1548 
1549     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1550       return false;
1551     // Immediate offset in range [-4095, 4095].
1552     if (!Memory.OffsetImm) return true;
1553     int64_t Val = Memory.OffsetImm->getValue();
1554     return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1555   }
1556   bool isConstPoolAsmImm() const {
1557     // Delay processing of Constant Pool Immediate, this will turn into
1558     // a constant. Match no other operand
1559     return (isConstantPoolImm());
1560   }
1561   bool isPostIdxImm8() const {
1562     if (!isImm()) return false;
1563     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1564     if (!CE) return false;
1565     int64_t Val = CE->getValue();
1566     return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1567   }
1568   bool isPostIdxImm8s4() const {
1569     if (!isImm()) return false;
1570     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1571     if (!CE) return false;
1572     int64_t Val = CE->getValue();
1573     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1574       (Val == INT32_MIN);
1575   }
1576 
1577   bool isMSRMask() const { return Kind == k_MSRMask; }
1578   bool isBankedReg() const { return Kind == k_BankedReg; }
1579   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1580 
1581   // NEON operands.
1582   bool isSingleSpacedVectorList() const {
1583     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1584   }
1585   bool isDoubleSpacedVectorList() const {
1586     return Kind == k_VectorList && VectorList.isDoubleSpaced;
1587   }
1588   bool isVecListOneD() const {
1589     if (!isSingleSpacedVectorList()) return false;
1590     return VectorList.Count == 1;
1591   }
1592 
1593   bool isVecListDPair() const {
1594     if (!isSingleSpacedVectorList()) return false;
1595     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1596               .contains(VectorList.RegNum));
1597   }
1598 
1599   bool isVecListThreeD() const {
1600     if (!isSingleSpacedVectorList()) return false;
1601     return VectorList.Count == 3;
1602   }
1603 
1604   bool isVecListFourD() const {
1605     if (!isSingleSpacedVectorList()) return false;
1606     return VectorList.Count == 4;
1607   }
1608 
1609   bool isVecListDPairSpaced() const {
1610     if (Kind != k_VectorList) return false;
1611     if (isSingleSpacedVectorList()) return false;
1612     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1613               .contains(VectorList.RegNum));
1614   }
1615 
1616   bool isVecListThreeQ() const {
1617     if (!isDoubleSpacedVectorList()) return false;
1618     return VectorList.Count == 3;
1619   }
1620 
1621   bool isVecListFourQ() const {
1622     if (!isDoubleSpacedVectorList()) return false;
1623     return VectorList.Count == 4;
1624   }
1625 
1626   bool isSingleSpacedVectorAllLanes() const {
1627     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1628   }
1629   bool isDoubleSpacedVectorAllLanes() const {
1630     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1631   }
1632   bool isVecListOneDAllLanes() const {
1633     if (!isSingleSpacedVectorAllLanes()) return false;
1634     return VectorList.Count == 1;
1635   }
1636 
1637   bool isVecListDPairAllLanes() const {
1638     if (!isSingleSpacedVectorAllLanes()) return false;
1639     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1640               .contains(VectorList.RegNum));
1641   }
1642 
1643   bool isVecListDPairSpacedAllLanes() const {
1644     if (!isDoubleSpacedVectorAllLanes()) return false;
1645     return VectorList.Count == 2;
1646   }
1647 
1648   bool isVecListThreeDAllLanes() const {
1649     if (!isSingleSpacedVectorAllLanes()) return false;
1650     return VectorList.Count == 3;
1651   }
1652 
1653   bool isVecListThreeQAllLanes() const {
1654     if (!isDoubleSpacedVectorAllLanes()) return false;
1655     return VectorList.Count == 3;
1656   }
1657 
1658   bool isVecListFourDAllLanes() const {
1659     if (!isSingleSpacedVectorAllLanes()) return false;
1660     return VectorList.Count == 4;
1661   }
1662 
1663   bool isVecListFourQAllLanes() const {
1664     if (!isDoubleSpacedVectorAllLanes()) return false;
1665     return VectorList.Count == 4;
1666   }
1667 
1668   bool isSingleSpacedVectorIndexed() const {
1669     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1670   }
1671   bool isDoubleSpacedVectorIndexed() const {
1672     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1673   }
1674   bool isVecListOneDByteIndexed() const {
1675     if (!isSingleSpacedVectorIndexed()) return false;
1676     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1677   }
1678 
1679   bool isVecListOneDHWordIndexed() const {
1680     if (!isSingleSpacedVectorIndexed()) return false;
1681     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1682   }
1683 
1684   bool isVecListOneDWordIndexed() const {
1685     if (!isSingleSpacedVectorIndexed()) return false;
1686     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1687   }
1688 
1689   bool isVecListTwoDByteIndexed() const {
1690     if (!isSingleSpacedVectorIndexed()) return false;
1691     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1692   }
1693 
1694   bool isVecListTwoDHWordIndexed() const {
1695     if (!isSingleSpacedVectorIndexed()) return false;
1696     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1697   }
1698 
1699   bool isVecListTwoQWordIndexed() const {
1700     if (!isDoubleSpacedVectorIndexed()) return false;
1701     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1702   }
1703 
1704   bool isVecListTwoQHWordIndexed() const {
1705     if (!isDoubleSpacedVectorIndexed()) return false;
1706     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1707   }
1708 
1709   bool isVecListTwoDWordIndexed() const {
1710     if (!isSingleSpacedVectorIndexed()) return false;
1711     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1712   }
1713 
1714   bool isVecListThreeDByteIndexed() const {
1715     if (!isSingleSpacedVectorIndexed()) return false;
1716     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1717   }
1718 
1719   bool isVecListThreeDHWordIndexed() const {
1720     if (!isSingleSpacedVectorIndexed()) return false;
1721     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1722   }
1723 
1724   bool isVecListThreeQWordIndexed() const {
1725     if (!isDoubleSpacedVectorIndexed()) return false;
1726     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1727   }
1728 
1729   bool isVecListThreeQHWordIndexed() const {
1730     if (!isDoubleSpacedVectorIndexed()) return false;
1731     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1732   }
1733 
1734   bool isVecListThreeDWordIndexed() const {
1735     if (!isSingleSpacedVectorIndexed()) return false;
1736     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1737   }
1738 
1739   bool isVecListFourDByteIndexed() const {
1740     if (!isSingleSpacedVectorIndexed()) return false;
1741     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1742   }
1743 
1744   bool isVecListFourDHWordIndexed() const {
1745     if (!isSingleSpacedVectorIndexed()) return false;
1746     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1747   }
1748 
1749   bool isVecListFourQWordIndexed() const {
1750     if (!isDoubleSpacedVectorIndexed()) return false;
1751     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1752   }
1753 
1754   bool isVecListFourQHWordIndexed() const {
1755     if (!isDoubleSpacedVectorIndexed()) return false;
1756     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1757   }
1758 
1759   bool isVecListFourDWordIndexed() const {
1760     if (!isSingleSpacedVectorIndexed()) return false;
1761     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1762   }
1763 
1764   bool isVectorIndex8() const {
1765     if (Kind != k_VectorIndex) return false;
1766     return VectorIndex.Val < 8;
1767   }
1768   bool isVectorIndex16() const {
1769     if (Kind != k_VectorIndex) return false;
1770     return VectorIndex.Val < 4;
1771   }
1772   bool isVectorIndex32() const {
1773     if (Kind != k_VectorIndex) return false;
1774     return VectorIndex.Val < 2;
1775   }
1776 
1777   bool isNEONi8splat() const {
1778     if (!isImm()) return false;
1779     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1780     // Must be a constant.
1781     if (!CE) return false;
1782     int64_t Value = CE->getValue();
1783     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1784     // value.
1785     return Value >= 0 && Value < 256;
1786   }
1787 
1788   bool isNEONi16splat() const {
1789     if (isNEONByteReplicate(2))
1790       return false; // Leave that for bytes replication and forbid by default.
1791     if (!isImm())
1792       return false;
1793     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1794     // Must be a constant.
1795     if (!CE) return false;
1796     unsigned Value = CE->getValue();
1797     return ARM_AM::isNEONi16splat(Value);
1798   }
1799 
1800   bool isNEONi16splatNot() const {
1801     if (!isImm())
1802       return false;
1803     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1804     // Must be a constant.
1805     if (!CE) return false;
1806     unsigned Value = CE->getValue();
1807     return ARM_AM::isNEONi16splat(~Value & 0xffff);
1808   }
1809 
1810   bool isNEONi32splat() const {
1811     if (isNEONByteReplicate(4))
1812       return false; // Leave that for bytes replication and forbid by default.
1813     if (!isImm())
1814       return false;
1815     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1816     // Must be a constant.
1817     if (!CE) return false;
1818     unsigned Value = CE->getValue();
1819     return ARM_AM::isNEONi32splat(Value);
1820   }
1821 
1822   bool isNEONi32splatNot() const {
1823     if (!isImm())
1824       return false;
1825     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1826     // Must be a constant.
1827     if (!CE) return false;
1828     unsigned Value = CE->getValue();
1829     return ARM_AM::isNEONi32splat(~Value);
1830   }
1831 
1832   bool isNEONByteReplicate(unsigned NumBytes) const {
1833     if (!isImm())
1834       return false;
1835     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1836     // Must be a constant.
1837     if (!CE)
1838       return false;
1839     int64_t Value = CE->getValue();
1840     if (!Value)
1841       return false; // Don't bother with zero.
1842 
1843     unsigned char B = Value & 0xff;
1844     for (unsigned i = 1; i < NumBytes; ++i) {
1845       Value >>= 8;
1846       if ((Value & 0xff) != B)
1847         return false;
1848     }
1849     return true;
1850   }
1851   bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); }
1852   bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); }
1853   bool isNEONi32vmov() const {
1854     if (isNEONByteReplicate(4))
1855       return false; // Let it to be classified as byte-replicate case.
1856     if (!isImm())
1857       return false;
1858     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1859     // Must be a constant.
1860     if (!CE)
1861       return false;
1862     int64_t Value = CE->getValue();
1863     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1864     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1865     // FIXME: This is probably wrong and a copy and paste from previous example
1866     return (Value >= 0 && Value < 256) ||
1867       (Value >= 0x0100 && Value <= 0xff00) ||
1868       (Value >= 0x010000 && Value <= 0xff0000) ||
1869       (Value >= 0x01000000 && Value <= 0xff000000) ||
1870       (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1871       (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1872   }
1873   bool isNEONi32vmovNeg() const {
1874     if (!isImm()) return false;
1875     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1876     // Must be a constant.
1877     if (!CE) return false;
1878     int64_t Value = ~CE->getValue();
1879     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1880     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1881     // FIXME: This is probably wrong and a copy and paste from previous example
1882     return (Value >= 0 && Value < 256) ||
1883       (Value >= 0x0100 && Value <= 0xff00) ||
1884       (Value >= 0x010000 && Value <= 0xff0000) ||
1885       (Value >= 0x01000000 && Value <= 0xff000000) ||
1886       (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1887       (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1888   }
1889 
1890   bool isNEONi64splat() const {
1891     if (!isImm()) return false;
1892     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1893     // Must be a constant.
1894     if (!CE) return false;
1895     uint64_t Value = CE->getValue();
1896     // i64 value with each byte being either 0 or 0xff.
1897     for (unsigned i = 0; i < 8; ++i, Value >>= 8)
1898       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1899     return true;
1900   }
1901 
1902   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1903     // Add as immediates when possible.  Null MCExpr = 0.
1904     if (!Expr)
1905       Inst.addOperand(MCOperand::createImm(0));
1906     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1907       Inst.addOperand(MCOperand::createImm(CE->getValue()));
1908     else
1909       Inst.addOperand(MCOperand::createExpr(Expr));
1910   }
1911 
1912   void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
1913     assert(N == 1 && "Invalid number of operands!");
1914     addExpr(Inst, getImm());
1915   }
1916 
1917   void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
1918     assert(N == 1 && "Invalid number of operands!");
1919     addExpr(Inst, getImm());
1920   }
1921 
1922   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1923     assert(N == 2 && "Invalid number of operands!");
1924     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1925     unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1926     Inst.addOperand(MCOperand::createReg(RegNum));
1927   }
1928 
1929   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1930     assert(N == 1 && "Invalid number of operands!");
1931     Inst.addOperand(MCOperand::createImm(getCoproc()));
1932   }
1933 
1934   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1935     assert(N == 1 && "Invalid number of operands!");
1936     Inst.addOperand(MCOperand::createImm(getCoproc()));
1937   }
1938 
1939   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1940     assert(N == 1 && "Invalid number of operands!");
1941     Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
1942   }
1943 
1944   void addITMaskOperands(MCInst &Inst, unsigned N) const {
1945     assert(N == 1 && "Invalid number of operands!");
1946     Inst.addOperand(MCOperand::createImm(ITMask.Mask));
1947   }
1948 
1949   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1950     assert(N == 1 && "Invalid number of operands!");
1951     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1952   }
1953 
1954   void addCCOutOperands(MCInst &Inst, unsigned N) const {
1955     assert(N == 1 && "Invalid number of operands!");
1956     Inst.addOperand(MCOperand::createReg(getReg()));
1957   }
1958 
1959   void addRegOperands(MCInst &Inst, unsigned N) const {
1960     assert(N == 1 && "Invalid number of operands!");
1961     Inst.addOperand(MCOperand::createReg(getReg()));
1962   }
1963 
1964   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1965     assert(N == 3 && "Invalid number of operands!");
1966     assert(isRegShiftedReg() &&
1967            "addRegShiftedRegOperands() on non-RegShiftedReg!");
1968     Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
1969     Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
1970     Inst.addOperand(MCOperand::createImm(
1971       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1972   }
1973 
1974   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1975     assert(N == 2 && "Invalid number of operands!");
1976     assert(isRegShiftedImm() &&
1977            "addRegShiftedImmOperands() on non-RegShiftedImm!");
1978     Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
1979     // Shift of #32 is encoded as 0 where permitted
1980     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1981     Inst.addOperand(MCOperand::createImm(
1982       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1983   }
1984 
1985   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1986     assert(N == 1 && "Invalid number of operands!");
1987     Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
1988                                          ShifterImm.Imm));
1989   }
1990 
1991   void addRegListOperands(MCInst &Inst, unsigned N) const {
1992     assert(N == 1 && "Invalid number of operands!");
1993     const SmallVectorImpl<unsigned> &RegList = getRegList();
1994     for (SmallVectorImpl<unsigned>::const_iterator
1995            I = RegList.begin(), E = RegList.end(); I != E; ++I)
1996       Inst.addOperand(MCOperand::createReg(*I));
1997   }
1998 
1999   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2000     addRegListOperands(Inst, N);
2001   }
2002 
2003   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2004     addRegListOperands(Inst, N);
2005   }
2006 
2007   void addRotImmOperands(MCInst &Inst, unsigned N) const {
2008     assert(N == 1 && "Invalid number of operands!");
2009     // Encoded as val>>3. The printer handles display as 8, 16, 24.
2010     Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2011   }
2012 
2013   void addModImmOperands(MCInst &Inst, unsigned N) const {
2014     assert(N == 1 && "Invalid number of operands!");
2015 
2016     // Support for fixups (MCFixup)
2017     if (isImm())
2018       return addImmOperands(Inst, N);
2019 
2020     Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2021   }
2022 
2023   void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2024     assert(N == 1 && "Invalid number of operands!");
2025     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2026     uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2027     Inst.addOperand(MCOperand::createImm(Enc));
2028   }
2029 
2030   void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2031     assert(N == 1 && "Invalid number of operands!");
2032     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2033     uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2034     Inst.addOperand(MCOperand::createImm(Enc));
2035   }
2036 
2037   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2038     assert(N == 1 && "Invalid number of operands!");
2039     // Munge the lsb/width into a bitfield mask.
2040     unsigned lsb = Bitfield.LSB;
2041     unsigned width = Bitfield.Width;
2042     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2043     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2044                       (32 - (lsb + width)));
2045     Inst.addOperand(MCOperand::createImm(Mask));
2046   }
2047 
2048   void addImmOperands(MCInst &Inst, unsigned N) const {
2049     assert(N == 1 && "Invalid number of operands!");
2050     addExpr(Inst, getImm());
2051   }
2052 
2053   void addFBits16Operands(MCInst &Inst, unsigned N) const {
2054     assert(N == 1 && "Invalid number of operands!");
2055     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2056     Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2057   }
2058 
2059   void addFBits32Operands(MCInst &Inst, unsigned N) const {
2060     assert(N == 1 && "Invalid number of operands!");
2061     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2062     Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2063   }
2064 
2065   void addFPImmOperands(MCInst &Inst, unsigned N) const {
2066     assert(N == 1 && "Invalid number of operands!");
2067     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2068     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2069     Inst.addOperand(MCOperand::createImm(Val));
2070   }
2071 
2072   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2073     assert(N == 1 && "Invalid number of operands!");
2074     // FIXME: We really want to scale the value here, but the LDRD/STRD
2075     // instruction don't encode operands that way yet.
2076     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2077     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2078   }
2079 
2080   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2081     assert(N == 1 && "Invalid number of operands!");
2082     // The immediate is scaled by four in the encoding and is stored
2083     // in the MCInst as such. Lop off the low two bits here.
2084     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2085     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2086   }
2087 
2088   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2089     assert(N == 1 && "Invalid number of operands!");
2090     // The immediate is scaled by four in the encoding and is stored
2091     // in the MCInst as such. Lop off the low two bits here.
2092     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2093     Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2094   }
2095 
2096   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2097     assert(N == 1 && "Invalid number of operands!");
2098     // The immediate is scaled by four in the encoding and is stored
2099     // in the MCInst as such. Lop off the low two bits here.
2100     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2101     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2102   }
2103 
2104   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2105     assert(N == 1 && "Invalid number of operands!");
2106     // The constant encodes as the immediate-1, and we store in the instruction
2107     // the bits as encoded, so subtract off one here.
2108     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2109     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2110   }
2111 
2112   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2113     assert(N == 1 && "Invalid number of operands!");
2114     // The constant encodes as the immediate-1, and we store in the instruction
2115     // the bits as encoded, so subtract off one here.
2116     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2117     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2118   }
2119 
2120   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2121     assert(N == 1 && "Invalid number of operands!");
2122     // The constant encodes as the immediate, except for 32, which encodes as
2123     // zero.
2124     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2125     unsigned Imm = CE->getValue();
2126     Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2127   }
2128 
2129   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2130     assert(N == 1 && "Invalid number of operands!");
2131     // An ASR value of 32 encodes as 0, so that's how we want to add it to
2132     // the instruction as well.
2133     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2134     int Val = CE->getValue();
2135     Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2136   }
2137 
2138   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2139     assert(N == 1 && "Invalid number of operands!");
2140     // The operand is actually a t2_so_imm, but we have its bitwise
2141     // negation in the assembly source, so twiddle it here.
2142     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2143     Inst.addOperand(MCOperand::createImm(~CE->getValue()));
2144   }
2145 
2146   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2147     assert(N == 1 && "Invalid number of operands!");
2148     // The operand is actually a t2_so_imm, but we have its
2149     // negation in the assembly source, so twiddle it here.
2150     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2151     Inst.addOperand(MCOperand::createImm(-CE->getValue()));
2152   }
2153 
2154   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2155     assert(N == 1 && "Invalid number of operands!");
2156     // The operand is actually an imm0_4095, but we have its
2157     // negation in the assembly source, so twiddle it here.
2158     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2159     Inst.addOperand(MCOperand::createImm(-CE->getValue()));
2160   }
2161 
2162   void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2163     if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2164       Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2165       return;
2166     }
2167 
2168     const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2169     assert(SR && "Unknown value type!");
2170     Inst.addOperand(MCOperand::createExpr(SR));
2171   }
2172 
2173   void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2174     assert(N == 1 && "Invalid number of operands!");
2175     if (isImm()) {
2176       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2177       if (CE) {
2178         Inst.addOperand(MCOperand::createImm(CE->getValue()));
2179         return;
2180       }
2181 
2182       const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2183 
2184       assert(SR && "Unknown value type!");
2185       Inst.addOperand(MCOperand::createExpr(SR));
2186       return;
2187     }
2188 
2189     assert(isMem()  && "Unknown value type!");
2190     assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2191     Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
2192   }
2193 
2194   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2195     assert(N == 1 && "Invalid number of operands!");
2196     Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2197   }
2198 
2199   void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2200     assert(N == 1 && "Invalid number of operands!");
2201     Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2202   }
2203 
2204   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2205     assert(N == 1 && "Invalid number of operands!");
2206     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2207   }
2208 
2209   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2210     assert(N == 1 && "Invalid number of operands!");
2211     int32_t Imm = Memory.OffsetImm->getValue();
2212     Inst.addOperand(MCOperand::createImm(Imm));
2213   }
2214 
2215   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2216     assert(N == 1 && "Invalid number of operands!");
2217     assert(isImm() && "Not an immediate!");
2218 
2219     // If we have an immediate that's not a constant, treat it as a label
2220     // reference needing a fixup.
2221     if (!isa<MCConstantExpr>(getImm())) {
2222       Inst.addOperand(MCOperand::createExpr(getImm()));
2223       return;
2224     }
2225 
2226     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2227     int Val = CE->getValue();
2228     Inst.addOperand(MCOperand::createImm(Val));
2229   }
2230 
2231   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2232     assert(N == 2 && "Invalid number of operands!");
2233     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2234     Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2235   }
2236 
2237   void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2238     addAlignedMemoryOperands(Inst, N);
2239   }
2240 
2241   void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2242     addAlignedMemoryOperands(Inst, N);
2243   }
2244 
2245   void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2246     addAlignedMemoryOperands(Inst, N);
2247   }
2248 
2249   void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2250     addAlignedMemoryOperands(Inst, N);
2251   }
2252 
2253   void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2254     addAlignedMemoryOperands(Inst, N);
2255   }
2256 
2257   void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2258     addAlignedMemoryOperands(Inst, N);
2259   }
2260 
2261   void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2262     addAlignedMemoryOperands(Inst, N);
2263   }
2264 
2265   void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2266     addAlignedMemoryOperands(Inst, N);
2267   }
2268 
2269   void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2270     addAlignedMemoryOperands(Inst, N);
2271   }
2272 
2273   void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2274     addAlignedMemoryOperands(Inst, N);
2275   }
2276 
2277   void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2278     addAlignedMemoryOperands(Inst, N);
2279   }
2280 
2281   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2282     assert(N == 3 && "Invalid number of operands!");
2283     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2284     if (!Memory.OffsetRegNum) {
2285       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2286       // Special case for #-0
2287       if (Val == INT32_MIN) Val = 0;
2288       if (Val < 0) Val = -Val;
2289       Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2290     } else {
2291       // For register offset, we encode the shift type and negation flag
2292       // here.
2293       Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2294                               Memory.ShiftImm, Memory.ShiftType);
2295     }
2296     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2297     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2298     Inst.addOperand(MCOperand::createImm(Val));
2299   }
2300 
2301   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2302     assert(N == 2 && "Invalid number of operands!");
2303     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2304     assert(CE && "non-constant AM2OffsetImm operand!");
2305     int32_t Val = CE->getValue();
2306     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2307     // Special case for #-0
2308     if (Val == INT32_MIN) Val = 0;
2309     if (Val < 0) Val = -Val;
2310     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2311     Inst.addOperand(MCOperand::createReg(0));
2312     Inst.addOperand(MCOperand::createImm(Val));
2313   }
2314 
2315   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2316     assert(N == 3 && "Invalid number of operands!");
2317     // If we have an immediate that's not a constant, treat it as a label
2318     // reference needing a fixup. If it is a constant, it's something else
2319     // and we reject it.
2320     if (isImm()) {
2321       Inst.addOperand(MCOperand::createExpr(getImm()));
2322       Inst.addOperand(MCOperand::createReg(0));
2323       Inst.addOperand(MCOperand::createImm(0));
2324       return;
2325     }
2326 
2327     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2328     if (!Memory.OffsetRegNum) {
2329       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2330       // Special case for #-0
2331       if (Val == INT32_MIN) Val = 0;
2332       if (Val < 0) Val = -Val;
2333       Val = ARM_AM::getAM3Opc(AddSub, Val);
2334     } else {
2335       // For register offset, we encode the shift type and negation flag
2336       // here.
2337       Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2338     }
2339     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2340     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2341     Inst.addOperand(MCOperand::createImm(Val));
2342   }
2343 
2344   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2345     assert(N == 2 && "Invalid number of operands!");
2346     if (Kind == k_PostIndexRegister) {
2347       int32_t Val =
2348         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2349       Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2350       Inst.addOperand(MCOperand::createImm(Val));
2351       return;
2352     }
2353 
2354     // Constant offset.
2355     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2356     int32_t Val = CE->getValue();
2357     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2358     // Special case for #-0
2359     if (Val == INT32_MIN) Val = 0;
2360     if (Val < 0) Val = -Val;
2361     Val = ARM_AM::getAM3Opc(AddSub, Val);
2362     Inst.addOperand(MCOperand::createReg(0));
2363     Inst.addOperand(MCOperand::createImm(Val));
2364   }
2365 
2366   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2367     assert(N == 2 && "Invalid number of operands!");
2368     // If we have an immediate that's not a constant, treat it as a label
2369     // reference needing a fixup. If it is a constant, it's something else
2370     // and we reject it.
2371     if (isImm()) {
2372       Inst.addOperand(MCOperand::createExpr(getImm()));
2373       Inst.addOperand(MCOperand::createImm(0));
2374       return;
2375     }
2376 
2377     // The lower two bits are always zero and as such are not encoded.
2378     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2379     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2380     // Special case for #-0
2381     if (Val == INT32_MIN) Val = 0;
2382     if (Val < 0) Val = -Val;
2383     Val = ARM_AM::getAM5Opc(AddSub, Val);
2384     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2385     Inst.addOperand(MCOperand::createImm(Val));
2386   }
2387 
2388   void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
2389     assert(N == 2 && "Invalid number of operands!");
2390     // If we have an immediate that's not a constant, treat it as a label
2391     // reference needing a fixup. If it is a constant, it's something else
2392     // and we reject it.
2393     if (isImm()) {
2394       Inst.addOperand(MCOperand::createExpr(getImm()));
2395       Inst.addOperand(MCOperand::createImm(0));
2396       return;
2397     }
2398 
2399     // The lower bit is always zero and as such is not encoded.
2400     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
2401     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2402     // Special case for #-0
2403     if (Val == INT32_MIN) Val = 0;
2404     if (Val < 0) Val = -Val;
2405     Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
2406     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2407     Inst.addOperand(MCOperand::createImm(Val));
2408   }
2409 
2410   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2411     assert(N == 2 && "Invalid number of operands!");
2412     // If we have an immediate that's not a constant, treat it as a label
2413     // reference needing a fixup. If it is a constant, it's something else
2414     // and we reject it.
2415     if (isImm()) {
2416       Inst.addOperand(MCOperand::createExpr(getImm()));
2417       Inst.addOperand(MCOperand::createImm(0));
2418       return;
2419     }
2420 
2421     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2422     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2423     Inst.addOperand(MCOperand::createImm(Val));
2424   }
2425 
2426   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2427     assert(N == 2 && "Invalid number of operands!");
2428     // The lower two bits are always zero and as such are not encoded.
2429     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2430     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2431     Inst.addOperand(MCOperand::createImm(Val));
2432   }
2433 
2434   void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2435     assert(N == 2 && "Invalid number of operands!");
2436     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2437     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2438     Inst.addOperand(MCOperand::createImm(Val));
2439   }
2440 
2441   void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2442     addMemImm8OffsetOperands(Inst, N);
2443   }
2444 
2445   void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2446     addMemImm8OffsetOperands(Inst, N);
2447   }
2448 
2449   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2450     assert(N == 2 && "Invalid number of operands!");
2451     // If this is an immediate, it's a label reference.
2452     if (isImm()) {
2453       addExpr(Inst, getImm());
2454       Inst.addOperand(MCOperand::createImm(0));
2455       return;
2456     }
2457 
2458     // Otherwise, it's a normal memory reg+offset.
2459     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2460     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2461     Inst.addOperand(MCOperand::createImm(Val));
2462   }
2463 
2464   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2465     assert(N == 2 && "Invalid number of operands!");
2466     // If this is an immediate, it's a label reference.
2467     if (isImm()) {
2468       addExpr(Inst, getImm());
2469       Inst.addOperand(MCOperand::createImm(0));
2470       return;
2471     }
2472 
2473     // Otherwise, it's a normal memory reg+offset.
2474     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2475     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2476     Inst.addOperand(MCOperand::createImm(Val));
2477   }
2478 
2479   void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
2480     assert(N == 1 && "Invalid number of operands!");
2481     // This is container for the immediate that we will create the constant
2482     // pool from
2483     addExpr(Inst, getConstantPoolImm());
2484     return;
2485   }
2486 
2487   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2488     assert(N == 2 && "Invalid number of operands!");
2489     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2490     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2491   }
2492 
2493   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2494     assert(N == 2 && "Invalid number of operands!");
2495     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2496     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2497   }
2498 
2499   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2500     assert(N == 3 && "Invalid number of operands!");
2501     unsigned Val =
2502       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2503                         Memory.ShiftImm, Memory.ShiftType);
2504     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2505     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2506     Inst.addOperand(MCOperand::createImm(Val));
2507   }
2508 
2509   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2510     assert(N == 3 && "Invalid number of operands!");
2511     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2512     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2513     Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
2514   }
2515 
2516   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2517     assert(N == 2 && "Invalid number of operands!");
2518     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2519     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2520   }
2521 
2522   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2523     assert(N == 2 && "Invalid number of operands!");
2524     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2525     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2526     Inst.addOperand(MCOperand::createImm(Val));
2527   }
2528 
2529   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2530     assert(N == 2 && "Invalid number of operands!");
2531     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2532     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2533     Inst.addOperand(MCOperand::createImm(Val));
2534   }
2535 
2536   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2537     assert(N == 2 && "Invalid number of operands!");
2538     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2539     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2540     Inst.addOperand(MCOperand::createImm(Val));
2541   }
2542 
2543   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2544     assert(N == 2 && "Invalid number of operands!");
2545     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2546     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2547     Inst.addOperand(MCOperand::createImm(Val));
2548   }
2549 
2550   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2551     assert(N == 1 && "Invalid number of operands!");
2552     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2553     assert(CE && "non-constant post-idx-imm8 operand!");
2554     int Imm = CE->getValue();
2555     bool isAdd = Imm >= 0;
2556     if (Imm == INT32_MIN) Imm = 0;
2557     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2558     Inst.addOperand(MCOperand::createImm(Imm));
2559   }
2560 
2561   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2562     assert(N == 1 && "Invalid number of operands!");
2563     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2564     assert(CE && "non-constant post-idx-imm8s4 operand!");
2565     int Imm = CE->getValue();
2566     bool isAdd = Imm >= 0;
2567     if (Imm == INT32_MIN) Imm = 0;
2568     // Immediate is scaled by 4.
2569     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2570     Inst.addOperand(MCOperand::createImm(Imm));
2571   }
2572 
2573   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2574     assert(N == 2 && "Invalid number of operands!");
2575     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2576     Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
2577   }
2578 
2579   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2580     assert(N == 2 && "Invalid number of operands!");
2581     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2582     // The sign, shift type, and shift amount are encoded in a single operand
2583     // using the AM2 encoding helpers.
2584     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2585     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2586                                      PostIdxReg.ShiftTy);
2587     Inst.addOperand(MCOperand::createImm(Imm));
2588   }
2589 
2590   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2591     assert(N == 1 && "Invalid number of operands!");
2592     Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
2593   }
2594 
2595   void addBankedRegOperands(MCInst &Inst, unsigned N) const {
2596     assert(N == 1 && "Invalid number of operands!");
2597     Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
2598   }
2599 
2600   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2601     assert(N == 1 && "Invalid number of operands!");
2602     Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
2603   }
2604 
2605   void addVecListOperands(MCInst &Inst, unsigned N) const {
2606     assert(N == 1 && "Invalid number of operands!");
2607     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2608   }
2609 
2610   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2611     assert(N == 2 && "Invalid number of operands!");
2612     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2613     Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
2614   }
2615 
2616   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2617     assert(N == 1 && "Invalid number of operands!");
2618     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2619   }
2620 
2621   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2622     assert(N == 1 && "Invalid number of operands!");
2623     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2624   }
2625 
2626   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2627     assert(N == 1 && "Invalid number of operands!");
2628     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2629   }
2630 
2631   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2632     assert(N == 1 && "Invalid number of operands!");
2633     // The immediate encodes the type of constant as well as the value.
2634     // Mask in that this is an i8 splat.
2635     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2636     Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
2637   }
2638 
2639   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2640     assert(N == 1 && "Invalid number of operands!");
2641     // The immediate encodes the type of constant as well as the value.
2642     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2643     unsigned Value = CE->getValue();
2644     Value = ARM_AM::encodeNEONi16splat(Value);
2645     Inst.addOperand(MCOperand::createImm(Value));
2646   }
2647 
2648   void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
2649     assert(N == 1 && "Invalid number of operands!");
2650     // The immediate encodes the type of constant as well as the value.
2651     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2652     unsigned Value = CE->getValue();
2653     Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
2654     Inst.addOperand(MCOperand::createImm(Value));
2655   }
2656 
2657   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2658     assert(N == 1 && "Invalid number of operands!");
2659     // The immediate encodes the type of constant as well as the value.
2660     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2661     unsigned Value = CE->getValue();
2662     Value = ARM_AM::encodeNEONi32splat(Value);
2663     Inst.addOperand(MCOperand::createImm(Value));
2664   }
2665 
2666   void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
2667     assert(N == 1 && "Invalid number of operands!");
2668     // The immediate encodes the type of constant as well as the value.
2669     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2670     unsigned Value = CE->getValue();
2671     Value = ARM_AM::encodeNEONi32splat(~Value);
2672     Inst.addOperand(MCOperand::createImm(Value));
2673   }
2674 
2675   void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const {
2676     assert(N == 1 && "Invalid number of operands!");
2677     // The immediate encodes the type of constant as well as the value.
2678     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2679     unsigned Value = CE->getValue();
2680     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2681             Inst.getOpcode() == ARM::VMOVv16i8) &&
2682            "All vmvn instructions that wants to replicate non-zero byte "
2683            "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2684     unsigned B = ((~Value) & 0xff);
2685     B |= 0xe00; // cmode = 0b1110
2686     Inst.addOperand(MCOperand::createImm(B));
2687   }
2688   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2689     assert(N == 1 && "Invalid number of operands!");
2690     // The immediate encodes the type of constant as well as the value.
2691     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2692     unsigned Value = CE->getValue();
2693     if (Value >= 256 && Value <= 0xffff)
2694       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2695     else if (Value > 0xffff && Value <= 0xffffff)
2696       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2697     else if (Value > 0xffffff)
2698       Value = (Value >> 24) | 0x600;
2699     Inst.addOperand(MCOperand::createImm(Value));
2700   }
2701 
2702   void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const {
2703     assert(N == 1 && "Invalid number of operands!");
2704     // The immediate encodes the type of constant as well as the value.
2705     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2706     unsigned Value = CE->getValue();
2707     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2708             Inst.getOpcode() == ARM::VMOVv16i8) &&
2709            "All instructions that wants to replicate non-zero byte "
2710            "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2711     unsigned B = Value & 0xff;
2712     B |= 0xe00; // cmode = 0b1110
2713     Inst.addOperand(MCOperand::createImm(B));
2714   }
2715   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2716     assert(N == 1 && "Invalid number of operands!");
2717     // The immediate encodes the type of constant as well as the value.
2718     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2719     unsigned Value = ~CE->getValue();
2720     if (Value >= 256 && Value <= 0xffff)
2721       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2722     else if (Value > 0xffff && Value <= 0xffffff)
2723       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2724     else if (Value > 0xffffff)
2725       Value = (Value >> 24) | 0x600;
2726     Inst.addOperand(MCOperand::createImm(Value));
2727   }
2728 
2729   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2730     assert(N == 1 && "Invalid number of operands!");
2731     // The immediate encodes the type of constant as well as the value.
2732     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2733     uint64_t Value = CE->getValue();
2734     unsigned Imm = 0;
2735     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2736       Imm |= (Value & 1) << i;
2737     }
2738     Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
2739   }
2740 
2741   void print(raw_ostream &OS) const override;
2742 
2743   static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
2744     auto Op = make_unique<ARMOperand>(k_ITCondMask);
2745     Op->ITMask.Mask = Mask;
2746     Op->StartLoc = S;
2747     Op->EndLoc = S;
2748     return Op;
2749   }
2750 
2751   static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
2752                                                     SMLoc S) {
2753     auto Op = make_unique<ARMOperand>(k_CondCode);
2754     Op->CC.Val = CC;
2755     Op->StartLoc = S;
2756     Op->EndLoc = S;
2757     return Op;
2758   }
2759 
2760   static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
2761     auto Op = make_unique<ARMOperand>(k_CoprocNum);
2762     Op->Cop.Val = CopVal;
2763     Op->StartLoc = S;
2764     Op->EndLoc = S;
2765     return Op;
2766   }
2767 
2768   static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
2769     auto Op = make_unique<ARMOperand>(k_CoprocReg);
2770     Op->Cop.Val = CopVal;
2771     Op->StartLoc = S;
2772     Op->EndLoc = S;
2773     return Op;
2774   }
2775 
2776   static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
2777                                                         SMLoc E) {
2778     auto Op = make_unique<ARMOperand>(k_CoprocOption);
2779     Op->Cop.Val = Val;
2780     Op->StartLoc = S;
2781     Op->EndLoc = E;
2782     return Op;
2783   }
2784 
2785   static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
2786     auto Op = make_unique<ARMOperand>(k_CCOut);
2787     Op->Reg.RegNum = RegNum;
2788     Op->StartLoc = S;
2789     Op->EndLoc = S;
2790     return Op;
2791   }
2792 
2793   static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
2794     auto Op = make_unique<ARMOperand>(k_Token);
2795     Op->Tok.Data = Str.data();
2796     Op->Tok.Length = Str.size();
2797     Op->StartLoc = S;
2798     Op->EndLoc = S;
2799     return Op;
2800   }
2801 
2802   static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
2803                                                SMLoc E) {
2804     auto Op = make_unique<ARMOperand>(k_Register);
2805     Op->Reg.RegNum = RegNum;
2806     Op->StartLoc = S;
2807     Op->EndLoc = E;
2808     return Op;
2809   }
2810 
2811   static std::unique_ptr<ARMOperand>
2812   CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2813                         unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
2814                         SMLoc E) {
2815     auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
2816     Op->RegShiftedReg.ShiftTy = ShTy;
2817     Op->RegShiftedReg.SrcReg = SrcReg;
2818     Op->RegShiftedReg.ShiftReg = ShiftReg;
2819     Op->RegShiftedReg.ShiftImm = ShiftImm;
2820     Op->StartLoc = S;
2821     Op->EndLoc = E;
2822     return Op;
2823   }
2824 
2825   static std::unique_ptr<ARMOperand>
2826   CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2827                          unsigned ShiftImm, SMLoc S, SMLoc E) {
2828     auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
2829     Op->RegShiftedImm.ShiftTy = ShTy;
2830     Op->RegShiftedImm.SrcReg = SrcReg;
2831     Op->RegShiftedImm.ShiftImm = ShiftImm;
2832     Op->StartLoc = S;
2833     Op->EndLoc = E;
2834     return Op;
2835   }
2836 
2837   static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
2838                                                       SMLoc S, SMLoc E) {
2839     auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
2840     Op->ShifterImm.isASR = isASR;
2841     Op->ShifterImm.Imm = Imm;
2842     Op->StartLoc = S;
2843     Op->EndLoc = E;
2844     return Op;
2845   }
2846 
2847   static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
2848                                                   SMLoc E) {
2849     auto Op = make_unique<ARMOperand>(k_RotateImmediate);
2850     Op->RotImm.Imm = Imm;
2851     Op->StartLoc = S;
2852     Op->EndLoc = E;
2853     return Op;
2854   }
2855 
2856   static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
2857                                                   SMLoc S, SMLoc E) {
2858     auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
2859     Op->ModImm.Bits = Bits;
2860     Op->ModImm.Rot = Rot;
2861     Op->StartLoc = S;
2862     Op->EndLoc = E;
2863     return Op;
2864   }
2865 
2866   static std::unique_ptr<ARMOperand>
2867   CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2868     auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate);
2869     Op->Imm.Val = Val;
2870     Op->StartLoc = S;
2871     Op->EndLoc = E;
2872     return Op;
2873   }
2874 
2875   static std::unique_ptr<ARMOperand>
2876   CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
2877     auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
2878     Op->Bitfield.LSB = LSB;
2879     Op->Bitfield.Width = Width;
2880     Op->StartLoc = S;
2881     Op->EndLoc = E;
2882     return Op;
2883   }
2884 
2885   static std::unique_ptr<ARMOperand>
2886   CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
2887                 SMLoc StartLoc, SMLoc EndLoc) {
2888     assert (Regs.size() > 0 && "RegList contains no registers?");
2889     KindTy Kind = k_RegisterList;
2890 
2891     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
2892       Kind = k_DPRRegisterList;
2893     else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2894              contains(Regs.front().second))
2895       Kind = k_SPRRegisterList;
2896 
2897     // Sort based on the register encoding values.
2898     array_pod_sort(Regs.begin(), Regs.end());
2899 
2900     auto Op = make_unique<ARMOperand>(Kind);
2901     for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator
2902            I = Regs.begin(), E = Regs.end(); I != E; ++I)
2903       Op->Registers.push_back(I->second);
2904     Op->StartLoc = StartLoc;
2905     Op->EndLoc = EndLoc;
2906     return Op;
2907   }
2908 
2909   static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
2910                                                       unsigned Count,
2911                                                       bool isDoubleSpaced,
2912                                                       SMLoc S, SMLoc E) {
2913     auto Op = make_unique<ARMOperand>(k_VectorList);
2914     Op->VectorList.RegNum = RegNum;
2915     Op->VectorList.Count = Count;
2916     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2917     Op->StartLoc = S;
2918     Op->EndLoc = E;
2919     return Op;
2920   }
2921 
2922   static std::unique_ptr<ARMOperand>
2923   CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
2924                            SMLoc S, SMLoc E) {
2925     auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
2926     Op->VectorList.RegNum = RegNum;
2927     Op->VectorList.Count = Count;
2928     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2929     Op->StartLoc = S;
2930     Op->EndLoc = E;
2931     return Op;
2932   }
2933 
2934   static std::unique_ptr<ARMOperand>
2935   CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
2936                           bool isDoubleSpaced, SMLoc S, SMLoc E) {
2937     auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
2938     Op->VectorList.RegNum = RegNum;
2939     Op->VectorList.Count = Count;
2940     Op->VectorList.LaneIndex = Index;
2941     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2942     Op->StartLoc = S;
2943     Op->EndLoc = E;
2944     return Op;
2945   }
2946 
2947   static std::unique_ptr<ARMOperand>
2948   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2949     auto Op = make_unique<ARMOperand>(k_VectorIndex);
2950     Op->VectorIndex.Val = Idx;
2951     Op->StartLoc = S;
2952     Op->EndLoc = E;
2953     return Op;
2954   }
2955 
2956   static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
2957                                                SMLoc E) {
2958     auto Op = make_unique<ARMOperand>(k_Immediate);
2959     Op->Imm.Val = Val;
2960     Op->StartLoc = S;
2961     Op->EndLoc = E;
2962     return Op;
2963   }
2964 
2965   static std::unique_ptr<ARMOperand>
2966   CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
2967             unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
2968             unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
2969             SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
2970     auto Op = make_unique<ARMOperand>(k_Memory);
2971     Op->Memory.BaseRegNum = BaseRegNum;
2972     Op->Memory.OffsetImm = OffsetImm;
2973     Op->Memory.OffsetRegNum = OffsetRegNum;
2974     Op->Memory.ShiftType = ShiftType;
2975     Op->Memory.ShiftImm = ShiftImm;
2976     Op->Memory.Alignment = Alignment;
2977     Op->Memory.isNegative = isNegative;
2978     Op->StartLoc = S;
2979     Op->EndLoc = E;
2980     Op->AlignmentLoc = AlignmentLoc;
2981     return Op;
2982   }
2983 
2984   static std::unique_ptr<ARMOperand>
2985   CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
2986                    unsigned ShiftImm, SMLoc S, SMLoc E) {
2987     auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
2988     Op->PostIdxReg.RegNum = RegNum;
2989     Op->PostIdxReg.isAdd = isAdd;
2990     Op->PostIdxReg.ShiftTy = ShiftTy;
2991     Op->PostIdxReg.ShiftImm = ShiftImm;
2992     Op->StartLoc = S;
2993     Op->EndLoc = E;
2994     return Op;
2995   }
2996 
2997   static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
2998                                                          SMLoc S) {
2999     auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
3000     Op->MBOpt.Val = Opt;
3001     Op->StartLoc = S;
3002     Op->EndLoc = S;
3003     return Op;
3004   }
3005 
3006   static std::unique_ptr<ARMOperand>
3007   CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3008     auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3009     Op->ISBOpt.Val = Opt;
3010     Op->StartLoc = S;
3011     Op->EndLoc = S;
3012     return Op;
3013   }
3014 
3015   static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3016                                                       SMLoc S) {
3017     auto Op = make_unique<ARMOperand>(k_ProcIFlags);
3018     Op->IFlags.Val = IFlags;
3019     Op->StartLoc = S;
3020     Op->EndLoc = S;
3021     return Op;
3022   }
3023 
3024   static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3025     auto Op = make_unique<ARMOperand>(k_MSRMask);
3026     Op->MMask.Val = MMask;
3027     Op->StartLoc = S;
3028     Op->EndLoc = S;
3029     return Op;
3030   }
3031 
3032   static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3033     auto Op = make_unique<ARMOperand>(k_BankedReg);
3034     Op->BankedReg.Val = Reg;
3035     Op->StartLoc = S;
3036     Op->EndLoc = S;
3037     return Op;
3038   }
3039 };
3040 
3041 } // end anonymous namespace.
3042 
3043 void ARMOperand::print(raw_ostream &OS) const {
3044   switch (Kind) {
3045   case k_CondCode:
3046     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3047     break;
3048   case k_CCOut:
3049     OS << "<ccout " << getReg() << ">";
3050     break;
3051   case k_ITCondMask: {
3052     static const char *const MaskStr[] = {
3053       "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
3054       "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
3055     };
3056     assert((ITMask.Mask & 0xf) == ITMask.Mask);
3057     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3058     break;
3059   }
3060   case k_CoprocNum:
3061     OS << "<coprocessor number: " << getCoproc() << ">";
3062     break;
3063   case k_CoprocReg:
3064     OS << "<coprocessor register: " << getCoproc() << ">";
3065     break;
3066   case k_CoprocOption:
3067     OS << "<coprocessor option: " << CoprocOption.Val << ">";
3068     break;
3069   case k_MSRMask:
3070     OS << "<mask: " << getMSRMask() << ">";
3071     break;
3072   case k_BankedReg:
3073     OS << "<banked reg: " << getBankedReg() << ">";
3074     break;
3075   case k_Immediate:
3076     OS << *getImm();
3077     break;
3078   case k_MemBarrierOpt:
3079     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3080     break;
3081   case k_InstSyncBarrierOpt:
3082     OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3083     break;
3084   case k_Memory:
3085     OS << "<memory "
3086        << " base:" << Memory.BaseRegNum;
3087     OS << ">";
3088     break;
3089   case k_PostIndexRegister:
3090     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3091        << PostIdxReg.RegNum;
3092     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3093       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3094          << PostIdxReg.ShiftImm;
3095     OS << ">";
3096     break;
3097   case k_ProcIFlags: {
3098     OS << "<ARM_PROC::";
3099     unsigned IFlags = getProcIFlags();
3100     for (int i=2; i >= 0; --i)
3101       if (IFlags & (1 << i))
3102         OS << ARM_PROC::IFlagsToString(1 << i);
3103     OS << ">";
3104     break;
3105   }
3106   case k_Register:
3107     OS << "<register " << getReg() << ">";
3108     break;
3109   case k_ShifterImmediate:
3110     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3111        << " #" << ShifterImm.Imm << ">";
3112     break;
3113   case k_ShiftedRegister:
3114     OS << "<so_reg_reg "
3115        << RegShiftedReg.SrcReg << " "
3116        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
3117        << " " << RegShiftedReg.ShiftReg << ">";
3118     break;
3119   case k_ShiftedImmediate:
3120     OS << "<so_reg_imm "
3121        << RegShiftedImm.SrcReg << " "
3122        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
3123        << " #" << RegShiftedImm.ShiftImm << ">";
3124     break;
3125   case k_RotateImmediate:
3126     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3127     break;
3128   case k_ModifiedImmediate:
3129     OS << "<mod_imm #" << ModImm.Bits << ", #"
3130        <<  ModImm.Rot << ")>";
3131     break;
3132   case k_ConstantPoolImmediate:
3133     OS << "<constant_pool_imm #" << *getConstantPoolImm();
3134     break;
3135   case k_BitfieldDescriptor:
3136     OS << "<bitfield " << "lsb: " << Bitfield.LSB
3137        << ", width: " << Bitfield.Width << ">";
3138     break;
3139   case k_RegisterList:
3140   case k_DPRRegisterList:
3141   case k_SPRRegisterList: {
3142     OS << "<register_list ";
3143 
3144     const SmallVectorImpl<unsigned> &RegList = getRegList();
3145     for (SmallVectorImpl<unsigned>::const_iterator
3146            I = RegList.begin(), E = RegList.end(); I != E; ) {
3147       OS << *I;
3148       if (++I < E) OS << ", ";
3149     }
3150 
3151     OS << ">";
3152     break;
3153   }
3154   case k_VectorList:
3155     OS << "<vector_list " << VectorList.Count << " * "
3156        << VectorList.RegNum << ">";
3157     break;
3158   case k_VectorListAllLanes:
3159     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
3160        << VectorList.RegNum << ">";
3161     break;
3162   case k_VectorListIndexed:
3163     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
3164        << VectorList.Count << " * " << VectorList.RegNum << ">";
3165     break;
3166   case k_Token:
3167     OS << "'" << getToken() << "'";
3168     break;
3169   case k_VectorIndex:
3170     OS << "<vectorindex " << getVectorIndex() << ">";
3171     break;
3172   }
3173 }
3174 
3175 /// @name Auto-generated Match Functions
3176 /// {
3177 
3178 static unsigned MatchRegisterName(StringRef Name);
3179 
3180 /// }
3181 
3182 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
3183                                  SMLoc &StartLoc, SMLoc &EndLoc) {
3184   const AsmToken &Tok = getParser().getTok();
3185   StartLoc = Tok.getLoc();
3186   EndLoc = Tok.getEndLoc();
3187   RegNo = tryParseRegister();
3188 
3189   return (RegNo == (unsigned)-1);
3190 }
3191 
3192 /// Try to parse a register name.  The token must be an Identifier when called,
3193 /// and if it is a register name the token is eaten and the register number is
3194 /// returned.  Otherwise return -1.
3195 ///
3196 int ARMAsmParser::tryParseRegister() {
3197   MCAsmParser &Parser = getParser();
3198   const AsmToken &Tok = Parser.getTok();
3199   if (Tok.isNot(AsmToken::Identifier)) return -1;
3200 
3201   std::string lowerCase = Tok.getString().lower();
3202   unsigned RegNum = MatchRegisterName(lowerCase);
3203   if (!RegNum) {
3204     RegNum = StringSwitch<unsigned>(lowerCase)
3205       .Case("r13", ARM::SP)
3206       .Case("r14", ARM::LR)
3207       .Case("r15", ARM::PC)
3208       .Case("ip", ARM::R12)
3209       // Additional register name aliases for 'gas' compatibility.
3210       .Case("a1", ARM::R0)
3211       .Case("a2", ARM::R1)
3212       .Case("a3", ARM::R2)
3213       .Case("a4", ARM::R3)
3214       .Case("v1", ARM::R4)
3215       .Case("v2", ARM::R5)
3216       .Case("v3", ARM::R6)
3217       .Case("v4", ARM::R7)
3218       .Case("v5", ARM::R8)
3219       .Case("v6", ARM::R9)
3220       .Case("v7", ARM::R10)
3221       .Case("v8", ARM::R11)
3222       .Case("sb", ARM::R9)
3223       .Case("sl", ARM::R10)
3224       .Case("fp", ARM::R11)
3225       .Default(0);
3226   }
3227   if (!RegNum) {
3228     // Check for aliases registered via .req. Canonicalize to lower case.
3229     // That's more consistent since register names are case insensitive, and
3230     // it's how the original entry was passed in from MC/MCParser/AsmParser.
3231     StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
3232     // If no match, return failure.
3233     if (Entry == RegisterReqs.end())
3234       return -1;
3235     Parser.Lex(); // Eat identifier token.
3236     return Entry->getValue();
3237   }
3238 
3239   // Some FPUs only have 16 D registers, so D16-D31 are invalid
3240   if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
3241     return -1;
3242 
3243   Parser.Lex(); // Eat identifier token.
3244 
3245   return RegNum;
3246 }
3247 
3248 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
3249 // If a recoverable error occurs, return 1. If an irrecoverable error
3250 // occurs, return -1. An irrecoverable error is one where tokens have been
3251 // consumed in the process of trying to parse the shifter (i.e., when it is
3252 // indeed a shifter operand, but malformed).
3253 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
3254   MCAsmParser &Parser = getParser();
3255   SMLoc S = Parser.getTok().getLoc();
3256   const AsmToken &Tok = Parser.getTok();
3257   if (Tok.isNot(AsmToken::Identifier))
3258     return -1;
3259 
3260   std::string lowerCase = Tok.getString().lower();
3261   ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
3262       .Case("asl", ARM_AM::lsl)
3263       .Case("lsl", ARM_AM::lsl)
3264       .Case("lsr", ARM_AM::lsr)
3265       .Case("asr", ARM_AM::asr)
3266       .Case("ror", ARM_AM::ror)
3267       .Case("rrx", ARM_AM::rrx)
3268       .Default(ARM_AM::no_shift);
3269 
3270   if (ShiftTy == ARM_AM::no_shift)
3271     return 1;
3272 
3273   Parser.Lex(); // Eat the operator.
3274 
3275   // The source register for the shift has already been added to the
3276   // operand list, so we need to pop it off and combine it into the shifted
3277   // register operand instead.
3278   std::unique_ptr<ARMOperand> PrevOp(
3279       (ARMOperand *)Operands.pop_back_val().release());
3280   if (!PrevOp->isReg())
3281     return Error(PrevOp->getStartLoc(), "shift must be of a register");
3282   int SrcReg = PrevOp->getReg();
3283 
3284   SMLoc EndLoc;
3285   int64_t Imm = 0;
3286   int ShiftReg = 0;
3287   if (ShiftTy == ARM_AM::rrx) {
3288     // RRX Doesn't have an explicit shift amount. The encoder expects
3289     // the shift register to be the same as the source register. Seems odd,
3290     // but OK.
3291     ShiftReg = SrcReg;
3292   } else {
3293     // Figure out if this is shifted by a constant or a register (for non-RRX).
3294     if (Parser.getTok().is(AsmToken::Hash) ||
3295         Parser.getTok().is(AsmToken::Dollar)) {
3296       Parser.Lex(); // Eat hash.
3297       SMLoc ImmLoc = Parser.getTok().getLoc();
3298       const MCExpr *ShiftExpr = nullptr;
3299       if (getParser().parseExpression(ShiftExpr, EndLoc)) {
3300         Error(ImmLoc, "invalid immediate shift value");
3301         return -1;
3302       }
3303       // The expression must be evaluatable as an immediate.
3304       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
3305       if (!CE) {
3306         Error(ImmLoc, "invalid immediate shift value");
3307         return -1;
3308       }
3309       // Range check the immediate.
3310       // lsl, ror: 0 <= imm <= 31
3311       // lsr, asr: 0 <= imm <= 32
3312       Imm = CE->getValue();
3313       if (Imm < 0 ||
3314           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
3315           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
3316         Error(ImmLoc, "immediate shift value out of range");
3317         return -1;
3318       }
3319       // shift by zero is a nop. Always send it through as lsl.
3320       // ('as' compatibility)
3321       if (Imm == 0)
3322         ShiftTy = ARM_AM::lsl;
3323     } else if (Parser.getTok().is(AsmToken::Identifier)) {
3324       SMLoc L = Parser.getTok().getLoc();
3325       EndLoc = Parser.getTok().getEndLoc();
3326       ShiftReg = tryParseRegister();
3327       if (ShiftReg == -1) {
3328         Error(L, "expected immediate or register in shift operand");
3329         return -1;
3330       }
3331     } else {
3332       Error(Parser.getTok().getLoc(),
3333             "expected immediate or register in shift operand");
3334       return -1;
3335     }
3336   }
3337 
3338   if (ShiftReg && ShiftTy != ARM_AM::rrx)
3339     Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3340                                                          ShiftReg, Imm,
3341                                                          S, EndLoc));
3342   else
3343     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3344                                                           S, EndLoc));
3345 
3346   return 0;
3347 }
3348 
3349 
3350 /// Try to parse a register name.  The token must be an Identifier when called.
3351 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
3352 /// if there is a "writeback". 'true' if it's not a register.
3353 ///
3354 /// TODO this is likely to change to allow different register types and or to
3355 /// parse for a specific register type.
3356 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3357   MCAsmParser &Parser = getParser();
3358   const AsmToken &RegTok = Parser.getTok();
3359   int RegNo = tryParseRegister();
3360   if (RegNo == -1)
3361     return true;
3362 
3363   Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
3364                                            RegTok.getEndLoc()));
3365 
3366   const AsmToken &ExclaimTok = Parser.getTok();
3367   if (ExclaimTok.is(AsmToken::Exclaim)) {
3368     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3369                                                ExclaimTok.getLoc()));
3370     Parser.Lex(); // Eat exclaim token
3371     return false;
3372   }
3373 
3374   // Also check for an index operand. This is only legal for vector registers,
3375   // but that'll get caught OK in operand matching, so we don't need to
3376   // explicitly filter everything else out here.
3377   if (Parser.getTok().is(AsmToken::LBrac)) {
3378     SMLoc SIdx = Parser.getTok().getLoc();
3379     Parser.Lex(); // Eat left bracket token.
3380 
3381     const MCExpr *ImmVal;
3382     if (getParser().parseExpression(ImmVal))
3383       return true;
3384     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3385     if (!MCE)
3386       return TokError("immediate value expected for vector index");
3387 
3388     if (Parser.getTok().isNot(AsmToken::RBrac))
3389       return Error(Parser.getTok().getLoc(), "']' expected");
3390 
3391     SMLoc E = Parser.getTok().getEndLoc();
3392     Parser.Lex(); // Eat right bracket token.
3393 
3394     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3395                                                      SIdx, E,
3396                                                      getContext()));
3397   }
3398 
3399   return false;
3400 }
3401 
3402 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
3403 /// instruction with a symbolic operand name.
3404 /// We accept "crN" syntax for GAS compatibility.
3405 /// <operand-name> ::= <prefix><number>
3406 /// If CoprocOp is 'c', then:
3407 ///   <prefix> ::= c | cr
3408 /// If CoprocOp is 'p', then :
3409 ///   <prefix> ::= p
3410 /// <number> ::= integer in range [0, 15]
3411 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3412   // Use the same layout as the tablegen'erated register name matcher. Ugly,
3413   // but efficient.
3414   if (Name.size() < 2 || Name[0] != CoprocOp)
3415     return -1;
3416   Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3417 
3418   switch (Name.size()) {
3419   default: return -1;
3420   case 1:
3421     switch (Name[0]) {
3422     default:  return -1;
3423     case '0': return 0;
3424     case '1': return 1;
3425     case '2': return 2;
3426     case '3': return 3;
3427     case '4': return 4;
3428     case '5': return 5;
3429     case '6': return 6;
3430     case '7': return 7;
3431     case '8': return 8;
3432     case '9': return 9;
3433     }
3434   case 2:
3435     if (Name[0] != '1')
3436       return -1;
3437     switch (Name[1]) {
3438     default:  return -1;
3439     // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3440     // However, old cores (v5/v6) did use them in that way.
3441     case '0': return 10;
3442     case '1': return 11;
3443     case '2': return 12;
3444     case '3': return 13;
3445     case '4': return 14;
3446     case '5': return 15;
3447     }
3448   }
3449 }
3450 
3451 /// parseITCondCode - Try to parse a condition code for an IT instruction.
3452 ARMAsmParser::OperandMatchResultTy
3453 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3454   MCAsmParser &Parser = getParser();
3455   SMLoc S = Parser.getTok().getLoc();
3456   const AsmToken &Tok = Parser.getTok();
3457   if (!Tok.is(AsmToken::Identifier))
3458     return MatchOperand_NoMatch;
3459   unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
3460     .Case("eq", ARMCC::EQ)
3461     .Case("ne", ARMCC::NE)
3462     .Case("hs", ARMCC::HS)
3463     .Case("cs", ARMCC::HS)
3464     .Case("lo", ARMCC::LO)
3465     .Case("cc", ARMCC::LO)
3466     .Case("mi", ARMCC::MI)
3467     .Case("pl", ARMCC::PL)
3468     .Case("vs", ARMCC::VS)
3469     .Case("vc", ARMCC::VC)
3470     .Case("hi", ARMCC::HI)
3471     .Case("ls", ARMCC::LS)
3472     .Case("ge", ARMCC::GE)
3473     .Case("lt", ARMCC::LT)
3474     .Case("gt", ARMCC::GT)
3475     .Case("le", ARMCC::LE)
3476     .Case("al", ARMCC::AL)
3477     .Default(~0U);
3478   if (CC == ~0U)
3479     return MatchOperand_NoMatch;
3480   Parser.Lex(); // Eat the token.
3481 
3482   Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3483 
3484   return MatchOperand_Success;
3485 }
3486 
3487 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3488 /// token must be an Identifier when called, and if it is a coprocessor
3489 /// number, the token is eaten and the operand is added to the operand list.
3490 ARMAsmParser::OperandMatchResultTy
3491 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3492   MCAsmParser &Parser = getParser();
3493   SMLoc S = Parser.getTok().getLoc();
3494   const AsmToken &Tok = Parser.getTok();
3495   if (Tok.isNot(AsmToken::Identifier))
3496     return MatchOperand_NoMatch;
3497 
3498   int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
3499   if (Num == -1)
3500     return MatchOperand_NoMatch;
3501   // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3502   if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3503     return MatchOperand_NoMatch;
3504 
3505   Parser.Lex(); // Eat identifier token.
3506   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3507   return MatchOperand_Success;
3508 }
3509 
3510 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3511 /// token must be an Identifier when called, and if it is a coprocessor
3512 /// number, the token is eaten and the operand is added to the operand list.
3513 ARMAsmParser::OperandMatchResultTy
3514 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3515   MCAsmParser &Parser = getParser();
3516   SMLoc S = Parser.getTok().getLoc();
3517   const AsmToken &Tok = Parser.getTok();
3518   if (Tok.isNot(AsmToken::Identifier))
3519     return MatchOperand_NoMatch;
3520 
3521   int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
3522   if (Reg == -1)
3523     return MatchOperand_NoMatch;
3524 
3525   Parser.Lex(); // Eat identifier token.
3526   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3527   return MatchOperand_Success;
3528 }
3529 
3530 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3531 /// coproc_option : '{' imm0_255 '}'
3532 ARMAsmParser::OperandMatchResultTy
3533 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3534   MCAsmParser &Parser = getParser();
3535   SMLoc S = Parser.getTok().getLoc();
3536 
3537   // If this isn't a '{', this isn't a coprocessor immediate operand.
3538   if (Parser.getTok().isNot(AsmToken::LCurly))
3539     return MatchOperand_NoMatch;
3540   Parser.Lex(); // Eat the '{'
3541 
3542   const MCExpr *Expr;
3543   SMLoc Loc = Parser.getTok().getLoc();
3544   if (getParser().parseExpression(Expr)) {
3545     Error(Loc, "illegal expression");
3546     return MatchOperand_ParseFail;
3547   }
3548   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3549   if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3550     Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3551     return MatchOperand_ParseFail;
3552   }
3553   int Val = CE->getValue();
3554 
3555   // Check for and consume the closing '}'
3556   if (Parser.getTok().isNot(AsmToken::RCurly))
3557     return MatchOperand_ParseFail;
3558   SMLoc E = Parser.getTok().getEndLoc();
3559   Parser.Lex(); // Eat the '}'
3560 
3561   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3562   return MatchOperand_Success;
3563 }
3564 
3565 // For register list parsing, we need to map from raw GPR register numbering
3566 // to the enumeration values. The enumeration values aren't sorted by
3567 // register number due to our using "sp", "lr" and "pc" as canonical names.
3568 static unsigned getNextRegister(unsigned Reg) {
3569   // If this is a GPR, we need to do it manually, otherwise we can rely
3570   // on the sort ordering of the enumeration since the other reg-classes
3571   // are sane.
3572   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3573     return Reg + 1;
3574   switch(Reg) {
3575   default: llvm_unreachable("Invalid GPR number!");
3576   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
3577   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
3578   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
3579   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
3580   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
3581   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3582   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
3583   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
3584   }
3585 }
3586 
3587 // Return the low-subreg of a given Q register.
3588 static unsigned getDRegFromQReg(unsigned QReg) {
3589   switch (QReg) {
3590   default: llvm_unreachable("expected a Q register!");
3591   case ARM::Q0:  return ARM::D0;
3592   case ARM::Q1:  return ARM::D2;
3593   case ARM::Q2:  return ARM::D4;
3594   case ARM::Q3:  return ARM::D6;
3595   case ARM::Q4:  return ARM::D8;
3596   case ARM::Q5:  return ARM::D10;
3597   case ARM::Q6:  return ARM::D12;
3598   case ARM::Q7:  return ARM::D14;
3599   case ARM::Q8:  return ARM::D16;
3600   case ARM::Q9:  return ARM::D18;
3601   case ARM::Q10: return ARM::D20;
3602   case ARM::Q11: return ARM::D22;
3603   case ARM::Q12: return ARM::D24;
3604   case ARM::Q13: return ARM::D26;
3605   case ARM::Q14: return ARM::D28;
3606   case ARM::Q15: return ARM::D30;
3607   }
3608 }
3609 
3610 /// Parse a register list.
3611 bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
3612   MCAsmParser &Parser = getParser();
3613   assert(Parser.getTok().is(AsmToken::LCurly) &&
3614          "Token is not a Left Curly Brace");
3615   SMLoc S = Parser.getTok().getLoc();
3616   Parser.Lex(); // Eat '{' token.
3617   SMLoc RegLoc = Parser.getTok().getLoc();
3618 
3619   // Check the first register in the list to see what register class
3620   // this is a list of.
3621   int Reg = tryParseRegister();
3622   if (Reg == -1)
3623     return Error(RegLoc, "register expected");
3624 
3625   // The reglist instructions have at most 16 registers, so reserve
3626   // space for that many.
3627   int EReg = 0;
3628   SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
3629 
3630   // Allow Q regs and just interpret them as the two D sub-registers.
3631   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3632     Reg = getDRegFromQReg(Reg);
3633     EReg = MRI->getEncodingValue(Reg);
3634     Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3635     ++Reg;
3636   }
3637   const MCRegisterClass *RC;
3638   if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3639     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3640   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3641     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3642   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3643     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3644   else
3645     return Error(RegLoc, "invalid register in register list");
3646 
3647   // Store the register.
3648   EReg = MRI->getEncodingValue(Reg);
3649   Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3650 
3651   // This starts immediately after the first register token in the list,
3652   // so we can see either a comma or a minus (range separator) as a legal
3653   // next token.
3654   while (Parser.getTok().is(AsmToken::Comma) ||
3655          Parser.getTok().is(AsmToken::Minus)) {
3656     if (Parser.getTok().is(AsmToken::Minus)) {
3657       Parser.Lex(); // Eat the minus.
3658       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3659       int EndReg = tryParseRegister();
3660       if (EndReg == -1)
3661         return Error(AfterMinusLoc, "register expected");
3662       // Allow Q regs and just interpret them as the two D sub-registers.
3663       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3664         EndReg = getDRegFromQReg(EndReg) + 1;
3665       // If the register is the same as the start reg, there's nothing
3666       // more to do.
3667       if (Reg == EndReg)
3668         continue;
3669       // The register must be in the same register class as the first.
3670       if (!RC->contains(EndReg))
3671         return Error(AfterMinusLoc, "invalid register in register list");
3672       // Ranges must go from low to high.
3673       if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3674         return Error(AfterMinusLoc, "bad range in register list");
3675 
3676       // Add all the registers in the range to the register list.
3677       while (Reg != EndReg) {
3678         Reg = getNextRegister(Reg);
3679         EReg = MRI->getEncodingValue(Reg);
3680         Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3681       }
3682       continue;
3683     }
3684     Parser.Lex(); // Eat the comma.
3685     RegLoc = Parser.getTok().getLoc();
3686     int OldReg = Reg;
3687     const AsmToken RegTok = Parser.getTok();
3688     Reg = tryParseRegister();
3689     if (Reg == -1)
3690       return Error(RegLoc, "register expected");
3691     // Allow Q regs and just interpret them as the two D sub-registers.
3692     bool isQReg = false;
3693     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3694       Reg = getDRegFromQReg(Reg);
3695       isQReg = true;
3696     }
3697     // The register must be in the same register class as the first.
3698     if (!RC->contains(Reg))
3699       return Error(RegLoc, "invalid register in register list");
3700     // List must be monotonically increasing.
3701     if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3702       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3703         Warning(RegLoc, "register list not in ascending order");
3704       else
3705         return Error(RegLoc, "register list not in ascending order");
3706     }
3707     if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3708       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3709               ") in register list");
3710       continue;
3711     }
3712     // VFP register lists must also be contiguous.
3713     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3714         Reg != OldReg + 1)
3715       return Error(RegLoc, "non-contiguous register range");
3716     EReg = MRI->getEncodingValue(Reg);
3717     Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3718     if (isQReg) {
3719       EReg = MRI->getEncodingValue(++Reg);
3720       Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3721     }
3722   }
3723 
3724   if (Parser.getTok().isNot(AsmToken::RCurly))
3725     return Error(Parser.getTok().getLoc(), "'}' expected");
3726   SMLoc E = Parser.getTok().getEndLoc();
3727   Parser.Lex(); // Eat '}' token.
3728 
3729   // Push the register list operand.
3730   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3731 
3732   // The ARM system instruction variants for LDM/STM have a '^' token here.
3733   if (Parser.getTok().is(AsmToken::Caret)) {
3734     Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3735     Parser.Lex(); // Eat '^' token.
3736   }
3737 
3738   return false;
3739 }
3740 
3741 // Helper function to parse the lane index for vector lists.
3742 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3743 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3744   MCAsmParser &Parser = getParser();
3745   Index = 0; // Always return a defined index value.
3746   if (Parser.getTok().is(AsmToken::LBrac)) {
3747     Parser.Lex(); // Eat the '['.
3748     if (Parser.getTok().is(AsmToken::RBrac)) {
3749       // "Dn[]" is the 'all lanes' syntax.
3750       LaneKind = AllLanes;
3751       EndLoc = Parser.getTok().getEndLoc();
3752       Parser.Lex(); // Eat the ']'.
3753       return MatchOperand_Success;
3754     }
3755 
3756     // There's an optional '#' token here. Normally there wouldn't be, but
3757     // inline assemble puts one in, and it's friendly to accept that.
3758     if (Parser.getTok().is(AsmToken::Hash))
3759       Parser.Lex(); // Eat '#' or '$'.
3760 
3761     const MCExpr *LaneIndex;
3762     SMLoc Loc = Parser.getTok().getLoc();
3763     if (getParser().parseExpression(LaneIndex)) {
3764       Error(Loc, "illegal expression");
3765       return MatchOperand_ParseFail;
3766     }
3767     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3768     if (!CE) {
3769       Error(Loc, "lane index must be empty or an integer");
3770       return MatchOperand_ParseFail;
3771     }
3772     if (Parser.getTok().isNot(AsmToken::RBrac)) {
3773       Error(Parser.getTok().getLoc(), "']' expected");
3774       return MatchOperand_ParseFail;
3775     }
3776     EndLoc = Parser.getTok().getEndLoc();
3777     Parser.Lex(); // Eat the ']'.
3778     int64_t Val = CE->getValue();
3779 
3780     // FIXME: Make this range check context sensitive for .8, .16, .32.
3781     if (Val < 0 || Val > 7) {
3782       Error(Parser.getTok().getLoc(), "lane index out of range");
3783       return MatchOperand_ParseFail;
3784     }
3785     Index = Val;
3786     LaneKind = IndexedLane;
3787     return MatchOperand_Success;
3788   }
3789   LaneKind = NoLanes;
3790   return MatchOperand_Success;
3791 }
3792 
3793 // parse a vector register list
3794 ARMAsmParser::OperandMatchResultTy
3795 ARMAsmParser::parseVectorList(OperandVector &Operands) {
3796   MCAsmParser &Parser = getParser();
3797   VectorLaneTy LaneKind;
3798   unsigned LaneIndex;
3799   SMLoc S = Parser.getTok().getLoc();
3800   // As an extension (to match gas), support a plain D register or Q register
3801   // (without encosing curly braces) as a single or double entry list,
3802   // respectively.
3803   if (Parser.getTok().is(AsmToken::Identifier)) {
3804     SMLoc E = Parser.getTok().getEndLoc();
3805     int Reg = tryParseRegister();
3806     if (Reg == -1)
3807       return MatchOperand_NoMatch;
3808     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3809       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3810       if (Res != MatchOperand_Success)
3811         return Res;
3812       switch (LaneKind) {
3813       case NoLanes:
3814         Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3815         break;
3816       case AllLanes:
3817         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3818                                                                 S, E));
3819         break;
3820       case IndexedLane:
3821         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3822                                                                LaneIndex,
3823                                                                false, S, E));
3824         break;
3825       }
3826       return MatchOperand_Success;
3827     }
3828     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3829       Reg = getDRegFromQReg(Reg);
3830       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3831       if (Res != MatchOperand_Success)
3832         return Res;
3833       switch (LaneKind) {
3834       case NoLanes:
3835         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3836                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3837         Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3838         break;
3839       case AllLanes:
3840         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3841                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3842         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3843                                                                 S, E));
3844         break;
3845       case IndexedLane:
3846         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3847                                                                LaneIndex,
3848                                                                false, S, E));
3849         break;
3850       }
3851       return MatchOperand_Success;
3852     }
3853     Error(S, "vector register expected");
3854     return MatchOperand_ParseFail;
3855   }
3856 
3857   if (Parser.getTok().isNot(AsmToken::LCurly))
3858     return MatchOperand_NoMatch;
3859 
3860   Parser.Lex(); // Eat '{' token.
3861   SMLoc RegLoc = Parser.getTok().getLoc();
3862 
3863   int Reg = tryParseRegister();
3864   if (Reg == -1) {
3865     Error(RegLoc, "register expected");
3866     return MatchOperand_ParseFail;
3867   }
3868   unsigned Count = 1;
3869   int Spacing = 0;
3870   unsigned FirstReg = Reg;
3871   // The list is of D registers, but we also allow Q regs and just interpret
3872   // them as the two D sub-registers.
3873   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3874     FirstReg = Reg = getDRegFromQReg(Reg);
3875     Spacing = 1; // double-spacing requires explicit D registers, otherwise
3876                  // it's ambiguous with four-register single spaced.
3877     ++Reg;
3878     ++Count;
3879   }
3880 
3881   SMLoc E;
3882   if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3883     return MatchOperand_ParseFail;
3884 
3885   while (Parser.getTok().is(AsmToken::Comma) ||
3886          Parser.getTok().is(AsmToken::Minus)) {
3887     if (Parser.getTok().is(AsmToken::Minus)) {
3888       if (!Spacing)
3889         Spacing = 1; // Register range implies a single spaced list.
3890       else if (Spacing == 2) {
3891         Error(Parser.getTok().getLoc(),
3892               "sequential registers in double spaced list");
3893         return MatchOperand_ParseFail;
3894       }
3895       Parser.Lex(); // Eat the minus.
3896       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3897       int EndReg = tryParseRegister();
3898       if (EndReg == -1) {
3899         Error(AfterMinusLoc, "register expected");
3900         return MatchOperand_ParseFail;
3901       }
3902       // Allow Q regs and just interpret them as the two D sub-registers.
3903       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3904         EndReg = getDRegFromQReg(EndReg) + 1;
3905       // If the register is the same as the start reg, there's nothing
3906       // more to do.
3907       if (Reg == EndReg)
3908         continue;
3909       // The register must be in the same register class as the first.
3910       if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3911         Error(AfterMinusLoc, "invalid register in register list");
3912         return MatchOperand_ParseFail;
3913       }
3914       // Ranges must go from low to high.
3915       if (Reg > EndReg) {
3916         Error(AfterMinusLoc, "bad range in register list");
3917         return MatchOperand_ParseFail;
3918       }
3919       // Parse the lane specifier if present.
3920       VectorLaneTy NextLaneKind;
3921       unsigned NextLaneIndex;
3922       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3923           MatchOperand_Success)
3924         return MatchOperand_ParseFail;
3925       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3926         Error(AfterMinusLoc, "mismatched lane index in register list");
3927         return MatchOperand_ParseFail;
3928       }
3929 
3930       // Add all the registers in the range to the register list.
3931       Count += EndReg - Reg;
3932       Reg = EndReg;
3933       continue;
3934     }
3935     Parser.Lex(); // Eat the comma.
3936     RegLoc = Parser.getTok().getLoc();
3937     int OldReg = Reg;
3938     Reg = tryParseRegister();
3939     if (Reg == -1) {
3940       Error(RegLoc, "register expected");
3941       return MatchOperand_ParseFail;
3942     }
3943     // vector register lists must be contiguous.
3944     // It's OK to use the enumeration values directly here rather, as the
3945     // VFP register classes have the enum sorted properly.
3946     //
3947     // The list is of D registers, but we also allow Q regs and just interpret
3948     // them as the two D sub-registers.
3949     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3950       if (!Spacing)
3951         Spacing = 1; // Register range implies a single spaced list.
3952       else if (Spacing == 2) {
3953         Error(RegLoc,
3954               "invalid register in double-spaced list (must be 'D' register')");
3955         return MatchOperand_ParseFail;
3956       }
3957       Reg = getDRegFromQReg(Reg);
3958       if (Reg != OldReg + 1) {
3959         Error(RegLoc, "non-contiguous register range");
3960         return MatchOperand_ParseFail;
3961       }
3962       ++Reg;
3963       Count += 2;
3964       // Parse the lane specifier if present.
3965       VectorLaneTy NextLaneKind;
3966       unsigned NextLaneIndex;
3967       SMLoc LaneLoc = Parser.getTok().getLoc();
3968       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3969           MatchOperand_Success)
3970         return MatchOperand_ParseFail;
3971       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3972         Error(LaneLoc, "mismatched lane index in register list");
3973         return MatchOperand_ParseFail;
3974       }
3975       continue;
3976     }
3977     // Normal D register.
3978     // Figure out the register spacing (single or double) of the list if
3979     // we don't know it already.
3980     if (!Spacing)
3981       Spacing = 1 + (Reg == OldReg + 2);
3982 
3983     // Just check that it's contiguous and keep going.
3984     if (Reg != OldReg + Spacing) {
3985       Error(RegLoc, "non-contiguous register range");
3986       return MatchOperand_ParseFail;
3987     }
3988     ++Count;
3989     // Parse the lane specifier if present.
3990     VectorLaneTy NextLaneKind;
3991     unsigned NextLaneIndex;
3992     SMLoc EndLoc = Parser.getTok().getLoc();
3993     if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3994       return MatchOperand_ParseFail;
3995     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3996       Error(EndLoc, "mismatched lane index in register list");
3997       return MatchOperand_ParseFail;
3998     }
3999   }
4000 
4001   if (Parser.getTok().isNot(AsmToken::RCurly)) {
4002     Error(Parser.getTok().getLoc(), "'}' expected");
4003     return MatchOperand_ParseFail;
4004   }
4005   E = Parser.getTok().getEndLoc();
4006   Parser.Lex(); // Eat '}' token.
4007 
4008   switch (LaneKind) {
4009   case NoLanes:
4010     // Two-register operands have been converted to the
4011     // composite register classes.
4012     if (Count == 2) {
4013       const MCRegisterClass *RC = (Spacing == 1) ?
4014         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4015         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4016       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4017     }
4018 
4019     Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
4020                                                     (Spacing == 2), S, E));
4021     break;
4022   case AllLanes:
4023     // Two-register operands have been converted to the
4024     // composite register classes.
4025     if (Count == 2) {
4026       const MCRegisterClass *RC = (Spacing == 1) ?
4027         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4028         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4029       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4030     }
4031     Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
4032                                                             (Spacing == 2),
4033                                                             S, E));
4034     break;
4035   case IndexedLane:
4036     Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4037                                                            LaneIndex,
4038                                                            (Spacing == 2),
4039                                                            S, E));
4040     break;
4041   }
4042   return MatchOperand_Success;
4043 }
4044 
4045 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4046 ARMAsmParser::OperandMatchResultTy
4047 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4048   MCAsmParser &Parser = getParser();
4049   SMLoc S = Parser.getTok().getLoc();
4050   const AsmToken &Tok = Parser.getTok();
4051   unsigned Opt;
4052 
4053   if (Tok.is(AsmToken::Identifier)) {
4054     StringRef OptStr = Tok.getString();
4055 
4056     Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4057       .Case("sy",    ARM_MB::SY)
4058       .Case("st",    ARM_MB::ST)
4059       .Case("ld",    ARM_MB::LD)
4060       .Case("sh",    ARM_MB::ISH)
4061       .Case("ish",   ARM_MB::ISH)
4062       .Case("shst",  ARM_MB::ISHST)
4063       .Case("ishst", ARM_MB::ISHST)
4064       .Case("ishld", ARM_MB::ISHLD)
4065       .Case("nsh",   ARM_MB::NSH)
4066       .Case("un",    ARM_MB::NSH)
4067       .Case("nshst", ARM_MB::NSHST)
4068       .Case("nshld", ARM_MB::NSHLD)
4069       .Case("unst",  ARM_MB::NSHST)
4070       .Case("osh",   ARM_MB::OSH)
4071       .Case("oshst", ARM_MB::OSHST)
4072       .Case("oshld", ARM_MB::OSHLD)
4073       .Default(~0U);
4074 
4075     // ishld, oshld, nshld and ld are only available from ARMv8.
4076     if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4077                         Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4078       Opt = ~0U;
4079 
4080     if (Opt == ~0U)
4081       return MatchOperand_NoMatch;
4082 
4083     Parser.Lex(); // Eat identifier token.
4084   } else if (Tok.is(AsmToken::Hash) ||
4085              Tok.is(AsmToken::Dollar) ||
4086              Tok.is(AsmToken::Integer)) {
4087     if (Parser.getTok().isNot(AsmToken::Integer))
4088       Parser.Lex(); // Eat '#' or '$'.
4089     SMLoc Loc = Parser.getTok().getLoc();
4090 
4091     const MCExpr *MemBarrierID;
4092     if (getParser().parseExpression(MemBarrierID)) {
4093       Error(Loc, "illegal expression");
4094       return MatchOperand_ParseFail;
4095     }
4096 
4097     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4098     if (!CE) {
4099       Error(Loc, "constant expression expected");
4100       return MatchOperand_ParseFail;
4101     }
4102 
4103     int Val = CE->getValue();
4104     if (Val & ~0xf) {
4105       Error(Loc, "immediate value out of range");
4106       return MatchOperand_ParseFail;
4107     }
4108 
4109     Opt = ARM_MB::RESERVED_0 + Val;
4110   } else
4111     return MatchOperand_ParseFail;
4112 
4113   Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4114   return MatchOperand_Success;
4115 }
4116 
4117 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4118 ARMAsmParser::OperandMatchResultTy
4119 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4120   MCAsmParser &Parser = getParser();
4121   SMLoc S = Parser.getTok().getLoc();
4122   const AsmToken &Tok = Parser.getTok();
4123   unsigned Opt;
4124 
4125   if (Tok.is(AsmToken::Identifier)) {
4126     StringRef OptStr = Tok.getString();
4127 
4128     if (OptStr.equals_lower("sy"))
4129       Opt = ARM_ISB::SY;
4130     else
4131       return MatchOperand_NoMatch;
4132 
4133     Parser.Lex(); // Eat identifier token.
4134   } else if (Tok.is(AsmToken::Hash) ||
4135              Tok.is(AsmToken::Dollar) ||
4136              Tok.is(AsmToken::Integer)) {
4137     if (Parser.getTok().isNot(AsmToken::Integer))
4138       Parser.Lex(); // Eat '#' or '$'.
4139     SMLoc Loc = Parser.getTok().getLoc();
4140 
4141     const MCExpr *ISBarrierID;
4142     if (getParser().parseExpression(ISBarrierID)) {
4143       Error(Loc, "illegal expression");
4144       return MatchOperand_ParseFail;
4145     }
4146 
4147     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
4148     if (!CE) {
4149       Error(Loc, "constant expression expected");
4150       return MatchOperand_ParseFail;
4151     }
4152 
4153     int Val = CE->getValue();
4154     if (Val & ~0xf) {
4155       Error(Loc, "immediate value out of range");
4156       return MatchOperand_ParseFail;
4157     }
4158 
4159     Opt = ARM_ISB::RESERVED_0 + Val;
4160   } else
4161     return MatchOperand_ParseFail;
4162 
4163   Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
4164           (ARM_ISB::InstSyncBOpt)Opt, S));
4165   return MatchOperand_Success;
4166 }
4167 
4168 
4169 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
4170 ARMAsmParser::OperandMatchResultTy
4171 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
4172   MCAsmParser &Parser = getParser();
4173   SMLoc S = Parser.getTok().getLoc();
4174   const AsmToken &Tok = Parser.getTok();
4175   if (!Tok.is(AsmToken::Identifier))
4176     return MatchOperand_NoMatch;
4177   StringRef IFlagsStr = Tok.getString();
4178 
4179   // An iflags string of "none" is interpreted to mean that none of the AIF
4180   // bits are set.  Not a terribly useful instruction, but a valid encoding.
4181   unsigned IFlags = 0;
4182   if (IFlagsStr != "none") {
4183         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
4184       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
4185         .Case("a", ARM_PROC::A)
4186         .Case("i", ARM_PROC::I)
4187         .Case("f", ARM_PROC::F)
4188         .Default(~0U);
4189 
4190       // If some specific iflag is already set, it means that some letter is
4191       // present more than once, this is not acceptable.
4192       if (Flag == ~0U || (IFlags & Flag))
4193         return MatchOperand_NoMatch;
4194 
4195       IFlags |= Flag;
4196     }
4197   }
4198 
4199   Parser.Lex(); // Eat identifier token.
4200   Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
4201   return MatchOperand_Success;
4202 }
4203 
4204 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
4205 ARMAsmParser::OperandMatchResultTy
4206 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
4207   MCAsmParser &Parser = getParser();
4208   SMLoc S = Parser.getTok().getLoc();
4209   const AsmToken &Tok = Parser.getTok();
4210   if (!Tok.is(AsmToken::Identifier))
4211     return MatchOperand_NoMatch;
4212   StringRef Mask = Tok.getString();
4213 
4214   if (isMClass()) {
4215     // See ARMv6-M 10.1.1
4216     std::string Name = Mask.lower();
4217     unsigned FlagsVal = StringSwitch<unsigned>(Name)
4218       // Note: in the documentation:
4219       //  ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
4220       //  for MSR APSR_nzcvq.
4221       // but we do make it an alias here.  This is so to get the "mask encoding"
4222       // bits correct on MSR APSR writes.
4223       //
4224       // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
4225       // should really only be allowed when writing a special register.  Note
4226       // they get dropped in the MRS instruction reading a special register as
4227       // the SYSm field is only 8 bits.
4228       .Case("apsr", 0x800)
4229       .Case("apsr_nzcvq", 0x800)
4230       .Case("apsr_g", 0x400)
4231       .Case("apsr_nzcvqg", 0xc00)
4232       .Case("iapsr", 0x801)
4233       .Case("iapsr_nzcvq", 0x801)
4234       .Case("iapsr_g", 0x401)
4235       .Case("iapsr_nzcvqg", 0xc01)
4236       .Case("eapsr", 0x802)
4237       .Case("eapsr_nzcvq", 0x802)
4238       .Case("eapsr_g", 0x402)
4239       .Case("eapsr_nzcvqg", 0xc02)
4240       .Case("xpsr", 0x803)
4241       .Case("xpsr_nzcvq", 0x803)
4242       .Case("xpsr_g", 0x403)
4243       .Case("xpsr_nzcvqg", 0xc03)
4244       .Case("ipsr", 0x805)
4245       .Case("epsr", 0x806)
4246       .Case("iepsr", 0x807)
4247       .Case("msp", 0x808)
4248       .Case("psp", 0x809)
4249       .Case("primask", 0x810)
4250       .Case("basepri", 0x811)
4251       .Case("basepri_max", 0x812)
4252       .Case("faultmask", 0x813)
4253       .Case("control", 0x814)
4254       .Case("msplim", 0x80a)
4255       .Case("psplim", 0x80b)
4256       .Case("msp_ns", 0x888)
4257       .Case("psp_ns", 0x889)
4258       .Case("msplim_ns", 0x88a)
4259       .Case("psplim_ns", 0x88b)
4260       .Case("primask_ns", 0x890)
4261       .Case("basepri_ns", 0x891)
4262       .Case("basepri_max_ns", 0x892)
4263       .Case("faultmask_ns", 0x893)
4264       .Case("control_ns", 0x894)
4265       .Case("sp_ns", 0x898)
4266       .Default(~0U);
4267 
4268     if (FlagsVal == ~0U)
4269       return MatchOperand_NoMatch;
4270 
4271     if (!hasDSP() && (FlagsVal & 0x400))
4272       // The _g and _nzcvqg versions are only valid if the DSP extension is
4273       // available.
4274       return MatchOperand_NoMatch;
4275 
4276     if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
4277       // basepri, basepri_max and faultmask only valid for V7m.
4278       return MatchOperand_NoMatch;
4279 
4280     if (!has8MSecExt() && (FlagsVal == 0x80a || FlagsVal == 0x80b ||
4281                              (FlagsVal > 0x814 && FlagsVal < 0xc00)))
4282       return MatchOperand_NoMatch;
4283 
4284     if (!hasV8MMainline() && (FlagsVal == 0x88a || FlagsVal == 0x88b ||
4285                               (FlagsVal > 0x890 && FlagsVal <= 0x893)))
4286       return MatchOperand_NoMatch;
4287 
4288     Parser.Lex(); // Eat identifier token.
4289     Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4290     return MatchOperand_Success;
4291   }
4292 
4293   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
4294   size_t Start = 0, Next = Mask.find('_');
4295   StringRef Flags = "";
4296   std::string SpecReg = Mask.slice(Start, Next).lower();
4297   if (Next != StringRef::npos)
4298     Flags = Mask.slice(Next+1, Mask.size());
4299 
4300   // FlagsVal contains the complete mask:
4301   // 3-0: Mask
4302   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4303   unsigned FlagsVal = 0;
4304 
4305   if (SpecReg == "apsr") {
4306     FlagsVal = StringSwitch<unsigned>(Flags)
4307     .Case("nzcvq",  0x8) // same as CPSR_f
4308     .Case("g",      0x4) // same as CPSR_s
4309     .Case("nzcvqg", 0xc) // same as CPSR_fs
4310     .Default(~0U);
4311 
4312     if (FlagsVal == ~0U) {
4313       if (!Flags.empty())
4314         return MatchOperand_NoMatch;
4315       else
4316         FlagsVal = 8; // No flag
4317     }
4318   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
4319     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
4320     if (Flags == "all" || Flags == "")
4321       Flags = "fc";
4322     for (int i = 0, e = Flags.size(); i != e; ++i) {
4323       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
4324       .Case("c", 1)
4325       .Case("x", 2)
4326       .Case("s", 4)
4327       .Case("f", 8)
4328       .Default(~0U);
4329 
4330       // If some specific flag is already set, it means that some letter is
4331       // present more than once, this is not acceptable.
4332       if (FlagsVal == ~0U || (FlagsVal & Flag))
4333         return MatchOperand_NoMatch;
4334       FlagsVal |= Flag;
4335     }
4336   } else // No match for special register.
4337     return MatchOperand_NoMatch;
4338 
4339   // Special register without flags is NOT equivalent to "fc" flags.
4340   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
4341   // two lines would enable gas compatibility at the expense of breaking
4342   // round-tripping.
4343   //
4344   // if (!FlagsVal)
4345   //  FlagsVal = 0x9;
4346 
4347   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4348   if (SpecReg == "spsr")
4349     FlagsVal |= 16;
4350 
4351   Parser.Lex(); // Eat identifier token.
4352   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4353   return MatchOperand_Success;
4354 }
4355 
4356 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
4357 /// use in the MRS/MSR instructions added to support virtualization.
4358 ARMAsmParser::OperandMatchResultTy
4359 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
4360   MCAsmParser &Parser = getParser();
4361   SMLoc S = Parser.getTok().getLoc();
4362   const AsmToken &Tok = Parser.getTok();
4363   if (!Tok.is(AsmToken::Identifier))
4364     return MatchOperand_NoMatch;
4365   StringRef RegName = Tok.getString();
4366 
4367   // The values here come from B9.2.3 of the ARM ARM, where bits 4-0 are SysM
4368   // and bit 5 is R.
4369   unsigned Encoding = StringSwitch<unsigned>(RegName.lower())
4370                           .Case("r8_usr", 0x00)
4371                           .Case("r9_usr", 0x01)
4372                           .Case("r10_usr", 0x02)
4373                           .Case("r11_usr", 0x03)
4374                           .Case("r12_usr", 0x04)
4375                           .Case("sp_usr", 0x05)
4376                           .Case("lr_usr", 0x06)
4377                           .Case("r8_fiq", 0x08)
4378                           .Case("r9_fiq", 0x09)
4379                           .Case("r10_fiq", 0x0a)
4380                           .Case("r11_fiq", 0x0b)
4381                           .Case("r12_fiq", 0x0c)
4382                           .Case("sp_fiq", 0x0d)
4383                           .Case("lr_fiq", 0x0e)
4384                           .Case("lr_irq", 0x10)
4385                           .Case("sp_irq", 0x11)
4386                           .Case("lr_svc", 0x12)
4387                           .Case("sp_svc", 0x13)
4388                           .Case("lr_abt", 0x14)
4389                           .Case("sp_abt", 0x15)
4390                           .Case("lr_und", 0x16)
4391                           .Case("sp_und", 0x17)
4392                           .Case("lr_mon", 0x1c)
4393                           .Case("sp_mon", 0x1d)
4394                           .Case("elr_hyp", 0x1e)
4395                           .Case("sp_hyp", 0x1f)
4396                           .Case("spsr_fiq", 0x2e)
4397                           .Case("spsr_irq", 0x30)
4398                           .Case("spsr_svc", 0x32)
4399                           .Case("spsr_abt", 0x34)
4400                           .Case("spsr_und", 0x36)
4401                           .Case("spsr_mon", 0x3c)
4402                           .Case("spsr_hyp", 0x3e)
4403                           .Default(~0U);
4404 
4405   if (Encoding == ~0U)
4406     return MatchOperand_NoMatch;
4407 
4408   Parser.Lex(); // Eat identifier token.
4409   Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
4410   return MatchOperand_Success;
4411 }
4412 
4413 ARMAsmParser::OperandMatchResultTy
4414 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4415                           int High) {
4416   MCAsmParser &Parser = getParser();
4417   const AsmToken &Tok = Parser.getTok();
4418   if (Tok.isNot(AsmToken::Identifier)) {
4419     Error(Parser.getTok().getLoc(), Op + " operand expected.");
4420     return MatchOperand_ParseFail;
4421   }
4422   StringRef ShiftName = Tok.getString();
4423   std::string LowerOp = Op.lower();
4424   std::string UpperOp = Op.upper();
4425   if (ShiftName != LowerOp && ShiftName != UpperOp) {
4426     Error(Parser.getTok().getLoc(), Op + " operand expected.");
4427     return MatchOperand_ParseFail;
4428   }
4429   Parser.Lex(); // Eat shift type token.
4430 
4431   // There must be a '#' and a shift amount.
4432   if (Parser.getTok().isNot(AsmToken::Hash) &&
4433       Parser.getTok().isNot(AsmToken::Dollar)) {
4434     Error(Parser.getTok().getLoc(), "'#' expected");
4435     return MatchOperand_ParseFail;
4436   }
4437   Parser.Lex(); // Eat hash token.
4438 
4439   const MCExpr *ShiftAmount;
4440   SMLoc Loc = Parser.getTok().getLoc();
4441   SMLoc EndLoc;
4442   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4443     Error(Loc, "illegal expression");
4444     return MatchOperand_ParseFail;
4445   }
4446   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4447   if (!CE) {
4448     Error(Loc, "constant expression expected");
4449     return MatchOperand_ParseFail;
4450   }
4451   int Val = CE->getValue();
4452   if (Val < Low || Val > High) {
4453     Error(Loc, "immediate value out of range");
4454     return MatchOperand_ParseFail;
4455   }
4456 
4457   Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4458 
4459   return MatchOperand_Success;
4460 }
4461 
4462 ARMAsmParser::OperandMatchResultTy
4463 ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4464   MCAsmParser &Parser = getParser();
4465   const AsmToken &Tok = Parser.getTok();
4466   SMLoc S = Tok.getLoc();
4467   if (Tok.isNot(AsmToken::Identifier)) {
4468     Error(S, "'be' or 'le' operand expected");
4469     return MatchOperand_ParseFail;
4470   }
4471   int Val = StringSwitch<int>(Tok.getString().lower())
4472     .Case("be", 1)
4473     .Case("le", 0)
4474     .Default(-1);
4475   Parser.Lex(); // Eat the token.
4476 
4477   if (Val == -1) {
4478     Error(S, "'be' or 'le' operand expected");
4479     return MatchOperand_ParseFail;
4480   }
4481   Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
4482                                                                   getContext()),
4483                                            S, Tok.getEndLoc()));
4484   return MatchOperand_Success;
4485 }
4486 
4487 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4488 /// instructions. Legal values are:
4489 ///     lsl #n  'n' in [0,31]
4490 ///     asr #n  'n' in [1,32]
4491 ///             n == 32 encoded as n == 0.
4492 ARMAsmParser::OperandMatchResultTy
4493 ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4494   MCAsmParser &Parser = getParser();
4495   const AsmToken &Tok = Parser.getTok();
4496   SMLoc S = Tok.getLoc();
4497   if (Tok.isNot(AsmToken::Identifier)) {
4498     Error(S, "shift operator 'asr' or 'lsl' expected");
4499     return MatchOperand_ParseFail;
4500   }
4501   StringRef ShiftName = Tok.getString();
4502   bool isASR;
4503   if (ShiftName == "lsl" || ShiftName == "LSL")
4504     isASR = false;
4505   else if (ShiftName == "asr" || ShiftName == "ASR")
4506     isASR = true;
4507   else {
4508     Error(S, "shift operator 'asr' or 'lsl' expected");
4509     return MatchOperand_ParseFail;
4510   }
4511   Parser.Lex(); // Eat the operator.
4512 
4513   // A '#' and a shift amount.
4514   if (Parser.getTok().isNot(AsmToken::Hash) &&
4515       Parser.getTok().isNot(AsmToken::Dollar)) {
4516     Error(Parser.getTok().getLoc(), "'#' expected");
4517     return MatchOperand_ParseFail;
4518   }
4519   Parser.Lex(); // Eat hash token.
4520   SMLoc ExLoc = Parser.getTok().getLoc();
4521 
4522   const MCExpr *ShiftAmount;
4523   SMLoc EndLoc;
4524   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4525     Error(ExLoc, "malformed shift expression");
4526     return MatchOperand_ParseFail;
4527   }
4528   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4529   if (!CE) {
4530     Error(ExLoc, "shift amount must be an immediate");
4531     return MatchOperand_ParseFail;
4532   }
4533 
4534   int64_t Val = CE->getValue();
4535   if (isASR) {
4536     // Shift amount must be in [1,32]
4537     if (Val < 1 || Val > 32) {
4538       Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4539       return MatchOperand_ParseFail;
4540     }
4541     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4542     if (isThumb() && Val == 32) {
4543       Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4544       return MatchOperand_ParseFail;
4545     }
4546     if (Val == 32) Val = 0;
4547   } else {
4548     // Shift amount must be in [1,32]
4549     if (Val < 0 || Val > 31) {
4550       Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4551       return MatchOperand_ParseFail;
4552     }
4553   }
4554 
4555   Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4556 
4557   return MatchOperand_Success;
4558 }
4559 
4560 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4561 /// of instructions. Legal values are:
4562 ///     ror #n  'n' in {0, 8, 16, 24}
4563 ARMAsmParser::OperandMatchResultTy
4564 ARMAsmParser::parseRotImm(OperandVector &Operands) {
4565   MCAsmParser &Parser = getParser();
4566   const AsmToken &Tok = Parser.getTok();
4567   SMLoc S = Tok.getLoc();
4568   if (Tok.isNot(AsmToken::Identifier))
4569     return MatchOperand_NoMatch;
4570   StringRef ShiftName = Tok.getString();
4571   if (ShiftName != "ror" && ShiftName != "ROR")
4572     return MatchOperand_NoMatch;
4573   Parser.Lex(); // Eat the operator.
4574 
4575   // A '#' and a rotate amount.
4576   if (Parser.getTok().isNot(AsmToken::Hash) &&
4577       Parser.getTok().isNot(AsmToken::Dollar)) {
4578     Error(Parser.getTok().getLoc(), "'#' expected");
4579     return MatchOperand_ParseFail;
4580   }
4581   Parser.Lex(); // Eat hash token.
4582   SMLoc ExLoc = Parser.getTok().getLoc();
4583 
4584   const MCExpr *ShiftAmount;
4585   SMLoc EndLoc;
4586   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4587     Error(ExLoc, "malformed rotate expression");
4588     return MatchOperand_ParseFail;
4589   }
4590   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4591   if (!CE) {
4592     Error(ExLoc, "rotate amount must be an immediate");
4593     return MatchOperand_ParseFail;
4594   }
4595 
4596   int64_t Val = CE->getValue();
4597   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4598   // normally, zero is represented in asm by omitting the rotate operand
4599   // entirely.
4600   if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4601     Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4602     return MatchOperand_ParseFail;
4603   }
4604 
4605   Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4606 
4607   return MatchOperand_Success;
4608 }
4609 
4610 ARMAsmParser::OperandMatchResultTy
4611 ARMAsmParser::parseModImm(OperandVector &Operands) {
4612   MCAsmParser &Parser = getParser();
4613   MCAsmLexer &Lexer = getLexer();
4614   int64_t Imm1, Imm2;
4615 
4616   SMLoc S = Parser.getTok().getLoc();
4617 
4618   // 1) A mod_imm operand can appear in the place of a register name:
4619   //   add r0, #mod_imm
4620   //   add r0, r0, #mod_imm
4621   // to correctly handle the latter, we bail out as soon as we see an
4622   // identifier.
4623   //
4624   // 2) Similarly, we do not want to parse into complex operands:
4625   //   mov r0, #mod_imm
4626   //   mov r0, :lower16:(_foo)
4627   if (Parser.getTok().is(AsmToken::Identifier) ||
4628       Parser.getTok().is(AsmToken::Colon))
4629     return MatchOperand_NoMatch;
4630 
4631   // Hash (dollar) is optional as per the ARMARM
4632   if (Parser.getTok().is(AsmToken::Hash) ||
4633       Parser.getTok().is(AsmToken::Dollar)) {
4634     // Avoid parsing into complex operands (#:)
4635     if (Lexer.peekTok().is(AsmToken::Colon))
4636       return MatchOperand_NoMatch;
4637 
4638     // Eat the hash (dollar)
4639     Parser.Lex();
4640   }
4641 
4642   SMLoc Sx1, Ex1;
4643   Sx1 = Parser.getTok().getLoc();
4644   const MCExpr *Imm1Exp;
4645   if (getParser().parseExpression(Imm1Exp, Ex1)) {
4646     Error(Sx1, "malformed expression");
4647     return MatchOperand_ParseFail;
4648   }
4649 
4650   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
4651 
4652   if (CE) {
4653     // Immediate must fit within 32-bits
4654     Imm1 = CE->getValue();
4655     int Enc = ARM_AM::getSOImmVal(Imm1);
4656     if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
4657       // We have a match!
4658       Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
4659                                                   (Enc & 0xF00) >> 7,
4660                                                   Sx1, Ex1));
4661       return MatchOperand_Success;
4662     }
4663 
4664     // We have parsed an immediate which is not for us, fallback to a plain
4665     // immediate. This can happen for instruction aliases. For an example,
4666     // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
4667     // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
4668     // instruction with a mod_imm operand. The alias is defined such that the
4669     // parser method is shared, that's why we have to do this here.
4670     if (Parser.getTok().is(AsmToken::EndOfStatement)) {
4671       Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4672       return MatchOperand_Success;
4673     }
4674   } else {
4675     // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
4676     // MCFixup). Fallback to a plain immediate.
4677     Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4678     return MatchOperand_Success;
4679   }
4680 
4681   // From this point onward, we expect the input to be a (#bits, #rot) pair
4682   if (Parser.getTok().isNot(AsmToken::Comma)) {
4683     Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
4684     return MatchOperand_ParseFail;
4685   }
4686 
4687   if (Imm1 & ~0xFF) {
4688     Error(Sx1, "immediate operand must a number in the range [0, 255]");
4689     return MatchOperand_ParseFail;
4690   }
4691 
4692   // Eat the comma
4693   Parser.Lex();
4694 
4695   // Repeat for #rot
4696   SMLoc Sx2, Ex2;
4697   Sx2 = Parser.getTok().getLoc();
4698 
4699   // Eat the optional hash (dollar)
4700   if (Parser.getTok().is(AsmToken::Hash) ||
4701       Parser.getTok().is(AsmToken::Dollar))
4702     Parser.Lex();
4703 
4704   const MCExpr *Imm2Exp;
4705   if (getParser().parseExpression(Imm2Exp, Ex2)) {
4706     Error(Sx2, "malformed expression");
4707     return MatchOperand_ParseFail;
4708   }
4709 
4710   CE = dyn_cast<MCConstantExpr>(Imm2Exp);
4711 
4712   if (CE) {
4713     Imm2 = CE->getValue();
4714     if (!(Imm2 & ~0x1E)) {
4715       // We have a match!
4716       Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
4717       return MatchOperand_Success;
4718     }
4719     Error(Sx2, "immediate operand must an even number in the range [0, 30]");
4720     return MatchOperand_ParseFail;
4721   } else {
4722     Error(Sx2, "constant expression expected");
4723     return MatchOperand_ParseFail;
4724   }
4725 }
4726 
4727 ARMAsmParser::OperandMatchResultTy
4728 ARMAsmParser::parseBitfield(OperandVector &Operands) {
4729   MCAsmParser &Parser = getParser();
4730   SMLoc S = Parser.getTok().getLoc();
4731   // The bitfield descriptor is really two operands, the LSB and the width.
4732   if (Parser.getTok().isNot(AsmToken::Hash) &&
4733       Parser.getTok().isNot(AsmToken::Dollar)) {
4734     Error(Parser.getTok().getLoc(), "'#' expected");
4735     return MatchOperand_ParseFail;
4736   }
4737   Parser.Lex(); // Eat hash token.
4738 
4739   const MCExpr *LSBExpr;
4740   SMLoc E = Parser.getTok().getLoc();
4741   if (getParser().parseExpression(LSBExpr)) {
4742     Error(E, "malformed immediate expression");
4743     return MatchOperand_ParseFail;
4744   }
4745   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
4746   if (!CE) {
4747     Error(E, "'lsb' operand must be an immediate");
4748     return MatchOperand_ParseFail;
4749   }
4750 
4751   int64_t LSB = CE->getValue();
4752   // The LSB must be in the range [0,31]
4753   if (LSB < 0 || LSB > 31) {
4754     Error(E, "'lsb' operand must be in the range [0,31]");
4755     return MatchOperand_ParseFail;
4756   }
4757   E = Parser.getTok().getLoc();
4758 
4759   // Expect another immediate operand.
4760   if (Parser.getTok().isNot(AsmToken::Comma)) {
4761     Error(Parser.getTok().getLoc(), "too few operands");
4762     return MatchOperand_ParseFail;
4763   }
4764   Parser.Lex(); // Eat hash token.
4765   if (Parser.getTok().isNot(AsmToken::Hash) &&
4766       Parser.getTok().isNot(AsmToken::Dollar)) {
4767     Error(Parser.getTok().getLoc(), "'#' expected");
4768     return MatchOperand_ParseFail;
4769   }
4770   Parser.Lex(); // Eat hash token.
4771 
4772   const MCExpr *WidthExpr;
4773   SMLoc EndLoc;
4774   if (getParser().parseExpression(WidthExpr, EndLoc)) {
4775     Error(E, "malformed immediate expression");
4776     return MatchOperand_ParseFail;
4777   }
4778   CE = dyn_cast<MCConstantExpr>(WidthExpr);
4779   if (!CE) {
4780     Error(E, "'width' operand must be an immediate");
4781     return MatchOperand_ParseFail;
4782   }
4783 
4784   int64_t Width = CE->getValue();
4785   // The LSB must be in the range [1,32-lsb]
4786   if (Width < 1 || Width > 32 - LSB) {
4787     Error(E, "'width' operand must be in the range [1,32-lsb]");
4788     return MatchOperand_ParseFail;
4789   }
4790 
4791   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4792 
4793   return MatchOperand_Success;
4794 }
4795 
4796 ARMAsmParser::OperandMatchResultTy
4797 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
4798   // Check for a post-index addressing register operand. Specifically:
4799   // postidx_reg := '+' register {, shift}
4800   //              | '-' register {, shift}
4801   //              | register {, shift}
4802 
4803   // This method must return MatchOperand_NoMatch without consuming any tokens
4804   // in the case where there is no match, as other alternatives take other
4805   // parse methods.
4806   MCAsmParser &Parser = getParser();
4807   AsmToken Tok = Parser.getTok();
4808   SMLoc S = Tok.getLoc();
4809   bool haveEaten = false;
4810   bool isAdd = true;
4811   if (Tok.is(AsmToken::Plus)) {
4812     Parser.Lex(); // Eat the '+' token.
4813     haveEaten = true;
4814   } else if (Tok.is(AsmToken::Minus)) {
4815     Parser.Lex(); // Eat the '-' token.
4816     isAdd = false;
4817     haveEaten = true;
4818   }
4819 
4820   SMLoc E = Parser.getTok().getEndLoc();
4821   int Reg = tryParseRegister();
4822   if (Reg == -1) {
4823     if (!haveEaten)
4824       return MatchOperand_NoMatch;
4825     Error(Parser.getTok().getLoc(), "register expected");
4826     return MatchOperand_ParseFail;
4827   }
4828 
4829   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
4830   unsigned ShiftImm = 0;
4831   if (Parser.getTok().is(AsmToken::Comma)) {
4832     Parser.Lex(); // Eat the ','.
4833     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4834       return MatchOperand_ParseFail;
4835 
4836     // FIXME: Only approximates end...may include intervening whitespace.
4837     E = Parser.getTok().getLoc();
4838   }
4839 
4840   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4841                                                   ShiftImm, S, E));
4842 
4843   return MatchOperand_Success;
4844 }
4845 
4846 ARMAsmParser::OperandMatchResultTy
4847 ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
4848   // Check for a post-index addressing register operand. Specifically:
4849   // am3offset := '+' register
4850   //              | '-' register
4851   //              | register
4852   //              | # imm
4853   //              | # + imm
4854   //              | # - imm
4855 
4856   // This method must return MatchOperand_NoMatch without consuming any tokens
4857   // in the case where there is no match, as other alternatives take other
4858   // parse methods.
4859   MCAsmParser &Parser = getParser();
4860   AsmToken Tok = Parser.getTok();
4861   SMLoc S = Tok.getLoc();
4862 
4863   // Do immediates first, as we always parse those if we have a '#'.
4864   if (Parser.getTok().is(AsmToken::Hash) ||
4865       Parser.getTok().is(AsmToken::Dollar)) {
4866     Parser.Lex(); // Eat '#' or '$'.
4867     // Explicitly look for a '-', as we need to encode negative zero
4868     // differently.
4869     bool isNegative = Parser.getTok().is(AsmToken::Minus);
4870     const MCExpr *Offset;
4871     SMLoc E;
4872     if (getParser().parseExpression(Offset, E))
4873       return MatchOperand_ParseFail;
4874     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4875     if (!CE) {
4876       Error(S, "constant expression expected");
4877       return MatchOperand_ParseFail;
4878     }
4879     // Negative zero is encoded as the flag value INT32_MIN.
4880     int32_t Val = CE->getValue();
4881     if (isNegative && Val == 0)
4882       Val = INT32_MIN;
4883 
4884     Operands.push_back(
4885       ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
4886 
4887     return MatchOperand_Success;
4888   }
4889 
4890 
4891   bool haveEaten = false;
4892   bool isAdd = true;
4893   if (Tok.is(AsmToken::Plus)) {
4894     Parser.Lex(); // Eat the '+' token.
4895     haveEaten = true;
4896   } else if (Tok.is(AsmToken::Minus)) {
4897     Parser.Lex(); // Eat the '-' token.
4898     isAdd = false;
4899     haveEaten = true;
4900   }
4901 
4902   Tok = Parser.getTok();
4903   int Reg = tryParseRegister();
4904   if (Reg == -1) {
4905     if (!haveEaten)
4906       return MatchOperand_NoMatch;
4907     Error(Tok.getLoc(), "register expected");
4908     return MatchOperand_ParseFail;
4909   }
4910 
4911   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4912                                                   0, S, Tok.getEndLoc()));
4913 
4914   return MatchOperand_Success;
4915 }
4916 
4917 /// Convert parsed operands to MCInst.  Needed here because this instruction
4918 /// only has two register operands, but multiplication is commutative so
4919 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
4920 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
4921                                     const OperandVector &Operands) {
4922   ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
4923   ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
4924   // If we have a three-operand form, make sure to set Rn to be the operand
4925   // that isn't the same as Rd.
4926   unsigned RegOp = 4;
4927   if (Operands.size() == 6 &&
4928       ((ARMOperand &)*Operands[4]).getReg() ==
4929           ((ARMOperand &)*Operands[3]).getReg())
4930     RegOp = 5;
4931   ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
4932   Inst.addOperand(Inst.getOperand(0));
4933   ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
4934 }
4935 
4936 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
4937                                     const OperandVector &Operands) {
4938   int CondOp = -1, ImmOp = -1;
4939   switch(Inst.getOpcode()) {
4940     case ARM::tB:
4941     case ARM::tBcc:  CondOp = 1; ImmOp = 2; break;
4942 
4943     case ARM::t2B:
4944     case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
4945 
4946     default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
4947   }
4948   // first decide whether or not the branch should be conditional
4949   // by looking at it's location relative to an IT block
4950   if(inITBlock()) {
4951     // inside an IT block we cannot have any conditional branches. any
4952     // such instructions needs to be converted to unconditional form
4953     switch(Inst.getOpcode()) {
4954       case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
4955       case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
4956     }
4957   } else {
4958     // outside IT blocks we can only have unconditional branches with AL
4959     // condition code or conditional branches with non-AL condition code
4960     unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
4961     switch(Inst.getOpcode()) {
4962       case ARM::tB:
4963       case ARM::tBcc:
4964         Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
4965         break;
4966       case ARM::t2B:
4967       case ARM::t2Bcc:
4968         Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
4969         break;
4970     }
4971   }
4972 
4973   // now decide on encoding size based on branch target range
4974   switch(Inst.getOpcode()) {
4975     // classify tB as either t2B or t1B based on range of immediate operand
4976     case ARM::tB: {
4977       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4978       if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
4979         Inst.setOpcode(ARM::t2B);
4980       break;
4981     }
4982     // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
4983     case ARM::tBcc: {
4984       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4985       if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
4986         Inst.setOpcode(ARM::t2Bcc);
4987       break;
4988     }
4989   }
4990   ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
4991   ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
4992 }
4993 
4994 /// Parse an ARM memory expression, return false if successful else return true
4995 /// or an error.  The first token must be a '[' when called.
4996 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
4997   MCAsmParser &Parser = getParser();
4998   SMLoc S, E;
4999   assert(Parser.getTok().is(AsmToken::LBrac) &&
5000          "Token is not a Left Bracket");
5001   S = Parser.getTok().getLoc();
5002   Parser.Lex(); // Eat left bracket token.
5003 
5004   const AsmToken &BaseRegTok = Parser.getTok();
5005   int BaseRegNum = tryParseRegister();
5006   if (BaseRegNum == -1)
5007     return Error(BaseRegTok.getLoc(), "register expected");
5008 
5009   // The next token must either be a comma, a colon or a closing bracket.
5010   const AsmToken &Tok = Parser.getTok();
5011   if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5012       !Tok.is(AsmToken::RBrac))
5013     return Error(Tok.getLoc(), "malformed memory operand");
5014 
5015   if (Tok.is(AsmToken::RBrac)) {
5016     E = Tok.getEndLoc();
5017     Parser.Lex(); // Eat right bracket token.
5018 
5019     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5020                                              ARM_AM::no_shift, 0, 0, false,
5021                                              S, E));
5022 
5023     // If there's a pre-indexing writeback marker, '!', just add it as a token
5024     // operand. It's rather odd, but syntactically valid.
5025     if (Parser.getTok().is(AsmToken::Exclaim)) {
5026       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5027       Parser.Lex(); // Eat the '!'.
5028     }
5029 
5030     return false;
5031   }
5032 
5033   assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5034          "Lost colon or comma in memory operand?!");
5035   if (Tok.is(AsmToken::Comma)) {
5036     Parser.Lex(); // Eat the comma.
5037   }
5038 
5039   // If we have a ':', it's an alignment specifier.
5040   if (Parser.getTok().is(AsmToken::Colon)) {
5041     Parser.Lex(); // Eat the ':'.
5042     E = Parser.getTok().getLoc();
5043     SMLoc AlignmentLoc = Tok.getLoc();
5044 
5045     const MCExpr *Expr;
5046     if (getParser().parseExpression(Expr))
5047      return true;
5048 
5049     // The expression has to be a constant. Memory references with relocations
5050     // don't come through here, as they use the <label> forms of the relevant
5051     // instructions.
5052     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5053     if (!CE)
5054       return Error (E, "constant expression expected");
5055 
5056     unsigned Align = 0;
5057     switch (CE->getValue()) {
5058     default:
5059       return Error(E,
5060                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5061     case 16:  Align = 2; break;
5062     case 32:  Align = 4; break;
5063     case 64:  Align = 8; break;
5064     case 128: Align = 16; break;
5065     case 256: Align = 32; break;
5066     }
5067 
5068     // Now we should have the closing ']'
5069     if (Parser.getTok().isNot(AsmToken::RBrac))
5070       return Error(Parser.getTok().getLoc(), "']' expected");
5071     E = Parser.getTok().getEndLoc();
5072     Parser.Lex(); // Eat right bracket token.
5073 
5074     // Don't worry about range checking the value here. That's handled by
5075     // the is*() predicates.
5076     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5077                                              ARM_AM::no_shift, 0, Align,
5078                                              false, S, E, AlignmentLoc));
5079 
5080     // If there's a pre-indexing writeback marker, '!', just add it as a token
5081     // operand.
5082     if (Parser.getTok().is(AsmToken::Exclaim)) {
5083       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5084       Parser.Lex(); // Eat the '!'.
5085     }
5086 
5087     return false;
5088   }
5089 
5090   // If we have a '#', it's an immediate offset, else assume it's a register
5091   // offset. Be friendly and also accept a plain integer (without a leading
5092   // hash) for gas compatibility.
5093   if (Parser.getTok().is(AsmToken::Hash) ||
5094       Parser.getTok().is(AsmToken::Dollar) ||
5095       Parser.getTok().is(AsmToken::Integer)) {
5096     if (Parser.getTok().isNot(AsmToken::Integer))
5097       Parser.Lex(); // Eat '#' or '$'.
5098     E = Parser.getTok().getLoc();
5099 
5100     bool isNegative = getParser().getTok().is(AsmToken::Minus);
5101     const MCExpr *Offset;
5102     if (getParser().parseExpression(Offset))
5103      return true;
5104 
5105     // The expression has to be a constant. Memory references with relocations
5106     // don't come through here, as they use the <label> forms of the relevant
5107     // instructions.
5108     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5109     if (!CE)
5110       return Error (E, "constant expression expected");
5111 
5112     // If the constant was #-0, represent it as INT32_MIN.
5113     int32_t Val = CE->getValue();
5114     if (isNegative && Val == 0)
5115       CE = MCConstantExpr::create(INT32_MIN, getContext());
5116 
5117     // Now we should have the closing ']'
5118     if (Parser.getTok().isNot(AsmToken::RBrac))
5119       return Error(Parser.getTok().getLoc(), "']' expected");
5120     E = Parser.getTok().getEndLoc();
5121     Parser.Lex(); // Eat right bracket token.
5122 
5123     // Don't worry about range checking the value here. That's handled by
5124     // the is*() predicates.
5125     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
5126                                              ARM_AM::no_shift, 0, 0,
5127                                              false, S, E));
5128 
5129     // If there's a pre-indexing writeback marker, '!', just add it as a token
5130     // operand.
5131     if (Parser.getTok().is(AsmToken::Exclaim)) {
5132       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5133       Parser.Lex(); // Eat the '!'.
5134     }
5135 
5136     return false;
5137   }
5138 
5139   // The register offset is optionally preceded by a '+' or '-'
5140   bool isNegative = false;
5141   if (Parser.getTok().is(AsmToken::Minus)) {
5142     isNegative = true;
5143     Parser.Lex(); // Eat the '-'.
5144   } else if (Parser.getTok().is(AsmToken::Plus)) {
5145     // Nothing to do.
5146     Parser.Lex(); // Eat the '+'.
5147   }
5148 
5149   E = Parser.getTok().getLoc();
5150   int OffsetRegNum = tryParseRegister();
5151   if (OffsetRegNum == -1)
5152     return Error(E, "register expected");
5153 
5154   // If there's a shift operator, handle it.
5155   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
5156   unsigned ShiftImm = 0;
5157   if (Parser.getTok().is(AsmToken::Comma)) {
5158     Parser.Lex(); // Eat the ','.
5159     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5160       return true;
5161   }
5162 
5163   // Now we should have the closing ']'
5164   if (Parser.getTok().isNot(AsmToken::RBrac))
5165     return Error(Parser.getTok().getLoc(), "']' expected");
5166   E = Parser.getTok().getEndLoc();
5167   Parser.Lex(); // Eat right bracket token.
5168 
5169   Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5170                                            ShiftType, ShiftImm, 0, isNegative,
5171                                            S, E));
5172 
5173   // If there's a pre-indexing writeback marker, '!', just add it as a token
5174   // operand.
5175   if (Parser.getTok().is(AsmToken::Exclaim)) {
5176     Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5177     Parser.Lex(); // Eat the '!'.
5178   }
5179 
5180   return false;
5181 }
5182 
5183 /// parseMemRegOffsetShift - one of these two:
5184 ///   ( lsl | lsr | asr | ror ) , # shift_amount
5185 ///   rrx
5186 /// return true if it parses a shift otherwise it returns false.
5187 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5188                                           unsigned &Amount) {
5189   MCAsmParser &Parser = getParser();
5190   SMLoc Loc = Parser.getTok().getLoc();
5191   const AsmToken &Tok = Parser.getTok();
5192   if (Tok.isNot(AsmToken::Identifier))
5193     return true;
5194   StringRef ShiftName = Tok.getString();
5195   if (ShiftName == "lsl" || ShiftName == "LSL" ||
5196       ShiftName == "asl" || ShiftName == "ASL")
5197     St = ARM_AM::lsl;
5198   else if (ShiftName == "lsr" || ShiftName == "LSR")
5199     St = ARM_AM::lsr;
5200   else if (ShiftName == "asr" || ShiftName == "ASR")
5201     St = ARM_AM::asr;
5202   else if (ShiftName == "ror" || ShiftName == "ROR")
5203     St = ARM_AM::ror;
5204   else if (ShiftName == "rrx" || ShiftName == "RRX")
5205     St = ARM_AM::rrx;
5206   else
5207     return Error(Loc, "illegal shift operator");
5208   Parser.Lex(); // Eat shift type token.
5209 
5210   // rrx stands alone.
5211   Amount = 0;
5212   if (St != ARM_AM::rrx) {
5213     Loc = Parser.getTok().getLoc();
5214     // A '#' and a shift amount.
5215     const AsmToken &HashTok = Parser.getTok();
5216     if (HashTok.isNot(AsmToken::Hash) &&
5217         HashTok.isNot(AsmToken::Dollar))
5218       return Error(HashTok.getLoc(), "'#' expected");
5219     Parser.Lex(); // Eat hash token.
5220 
5221     const MCExpr *Expr;
5222     if (getParser().parseExpression(Expr))
5223       return true;
5224     // Range check the immediate.
5225     // lsl, ror: 0 <= imm <= 31
5226     // lsr, asr: 0 <= imm <= 32
5227     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5228     if (!CE)
5229       return Error(Loc, "shift amount must be an immediate");
5230     int64_t Imm = CE->getValue();
5231     if (Imm < 0 ||
5232         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5233         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5234       return Error(Loc, "immediate shift value out of range");
5235     // If <ShiftTy> #0, turn it into a no_shift.
5236     if (Imm == 0)
5237       St = ARM_AM::lsl;
5238     // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5239     if (Imm == 32)
5240       Imm = 0;
5241     Amount = Imm;
5242   }
5243 
5244   return false;
5245 }
5246 
5247 /// parseFPImm - A floating point immediate expression operand.
5248 ARMAsmParser::OperandMatchResultTy
5249 ARMAsmParser::parseFPImm(OperandVector &Operands) {
5250   MCAsmParser &Parser = getParser();
5251   // Anything that can accept a floating point constant as an operand
5252   // needs to go through here, as the regular parseExpression is
5253   // integer only.
5254   //
5255   // This routine still creates a generic Immediate operand, containing
5256   // a bitcast of the 64-bit floating point value. The various operands
5257   // that accept floats can check whether the value is valid for them
5258   // via the standard is*() predicates.
5259 
5260   SMLoc S = Parser.getTok().getLoc();
5261 
5262   if (Parser.getTok().isNot(AsmToken::Hash) &&
5263       Parser.getTok().isNot(AsmToken::Dollar))
5264     return MatchOperand_NoMatch;
5265 
5266   // Disambiguate the VMOV forms that can accept an FP immediate.
5267   // vmov.f32 <sreg>, #imm
5268   // vmov.f64 <dreg>, #imm
5269   // vmov.f32 <dreg>, #imm  @ vector f32x2
5270   // vmov.f32 <qreg>, #imm  @ vector f32x4
5271   //
5272   // There are also the NEON VMOV instructions which expect an
5273   // integer constant. Make sure we don't try to parse an FPImm
5274   // for these:
5275   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5276   ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5277   bool isVmovf = TyOp.isToken() &&
5278                  (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5279                   TyOp.getToken() == ".f16");
5280   ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5281   bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5282                                          Mnemonic.getToken() == "fconsts");
5283   if (!(isVmovf || isFconst))
5284     return MatchOperand_NoMatch;
5285 
5286   Parser.Lex(); // Eat '#' or '$'.
5287 
5288   // Handle negation, as that still comes through as a separate token.
5289   bool isNegative = false;
5290   if (Parser.getTok().is(AsmToken::Minus)) {
5291     isNegative = true;
5292     Parser.Lex();
5293   }
5294   const AsmToken &Tok = Parser.getTok();
5295   SMLoc Loc = Tok.getLoc();
5296   if (Tok.is(AsmToken::Real) && isVmovf) {
5297     APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
5298     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5299     // If we had a '-' in front, toggle the sign bit.
5300     IntVal ^= (uint64_t)isNegative << 31;
5301     Parser.Lex(); // Eat the token.
5302     Operands.push_back(ARMOperand::CreateImm(
5303           MCConstantExpr::create(IntVal, getContext()),
5304           S, Parser.getTok().getLoc()));
5305     return MatchOperand_Success;
5306   }
5307   // Also handle plain integers. Instructions which allow floating point
5308   // immediates also allow a raw encoded 8-bit value.
5309   if (Tok.is(AsmToken::Integer) && isFconst) {
5310     int64_t Val = Tok.getIntVal();
5311     Parser.Lex(); // Eat the token.
5312     if (Val > 255 || Val < 0) {
5313       Error(Loc, "encoded floating point value out of range");
5314       return MatchOperand_ParseFail;
5315     }
5316     float RealVal = ARM_AM::getFPImmFloat(Val);
5317     Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
5318 
5319     Operands.push_back(ARMOperand::CreateImm(
5320         MCConstantExpr::create(Val, getContext()), S,
5321         Parser.getTok().getLoc()));
5322     return MatchOperand_Success;
5323   }
5324 
5325   Error(Loc, "invalid floating point immediate");
5326   return MatchOperand_ParseFail;
5327 }
5328 
5329 /// Parse a arm instruction operand.  For now this parses the operand regardless
5330 /// of the mnemonic.
5331 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
5332   MCAsmParser &Parser = getParser();
5333   SMLoc S, E;
5334 
5335   // Check if the current operand has a custom associated parser, if so, try to
5336   // custom parse the operand, or fallback to the general approach.
5337   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
5338   if (ResTy == MatchOperand_Success)
5339     return false;
5340   // If there wasn't a custom match, try the generic matcher below. Otherwise,
5341   // there was a match, but an error occurred, in which case, just return that
5342   // the operand parsing failed.
5343   if (ResTy == MatchOperand_ParseFail)
5344     return true;
5345 
5346   switch (getLexer().getKind()) {
5347   default:
5348     Error(Parser.getTok().getLoc(), "unexpected token in operand");
5349     return true;
5350   case AsmToken::Identifier: {
5351     // If we've seen a branch mnemonic, the next operand must be a label.  This
5352     // is true even if the label is a register name.  So "br r1" means branch to
5353     // label "r1".
5354     bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
5355     if (!ExpectLabel) {
5356       if (!tryParseRegisterWithWriteBack(Operands))
5357         return false;
5358       int Res = tryParseShiftRegister(Operands);
5359       if (Res == 0) // success
5360         return false;
5361       else if (Res == -1) // irrecoverable error
5362         return true;
5363       // If this is VMRS, check for the apsr_nzcv operand.
5364       if (Mnemonic == "vmrs" &&
5365           Parser.getTok().getString().equals_lower("apsr_nzcv")) {
5366         S = Parser.getTok().getLoc();
5367         Parser.Lex();
5368         Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
5369         return false;
5370       }
5371     }
5372 
5373     // Fall though for the Identifier case that is not a register or a
5374     // special name.
5375   }
5376   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
5377   case AsmToken::Integer: // things like 1f and 2b as a branch targets
5378   case AsmToken::String:  // quoted label names.
5379   case AsmToken::Dot: {   // . as a branch target
5380     // This was not a register so parse other operands that start with an
5381     // identifier (like labels) as expressions and create them as immediates.
5382     const MCExpr *IdVal;
5383     S = Parser.getTok().getLoc();
5384     if (getParser().parseExpression(IdVal))
5385       return true;
5386     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5387     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
5388     return false;
5389   }
5390   case AsmToken::LBrac:
5391     return parseMemory(Operands);
5392   case AsmToken::LCurly:
5393     return parseRegisterList(Operands);
5394   case AsmToken::Dollar:
5395   case AsmToken::Hash: {
5396     // #42 -> immediate.
5397     S = Parser.getTok().getLoc();
5398     Parser.Lex();
5399 
5400     if (Parser.getTok().isNot(AsmToken::Colon)) {
5401       bool isNegative = Parser.getTok().is(AsmToken::Minus);
5402       const MCExpr *ImmVal;
5403       if (getParser().parseExpression(ImmVal))
5404         return true;
5405       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
5406       if (CE) {
5407         int32_t Val = CE->getValue();
5408         if (isNegative && Val == 0)
5409           ImmVal = MCConstantExpr::create(INT32_MIN, getContext());
5410       }
5411       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5412       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
5413 
5414       // There can be a trailing '!' on operands that we want as a separate
5415       // '!' Token operand. Handle that here. For example, the compatibility
5416       // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
5417       if (Parser.getTok().is(AsmToken::Exclaim)) {
5418         Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
5419                                                    Parser.getTok().getLoc()));
5420         Parser.Lex(); // Eat exclaim token
5421       }
5422       return false;
5423     }
5424     // w/ a ':' after the '#', it's just like a plain ':'.
5425     LLVM_FALLTHROUGH;
5426   }
5427   case AsmToken::Colon: {
5428     S = Parser.getTok().getLoc();
5429     // ":lower16:" and ":upper16:" expression prefixes
5430     // FIXME: Check it's an expression prefix,
5431     // e.g. (FOO - :lower16:BAR) isn't legal.
5432     ARMMCExpr::VariantKind RefKind;
5433     if (parsePrefix(RefKind))
5434       return true;
5435 
5436     const MCExpr *SubExprVal;
5437     if (getParser().parseExpression(SubExprVal))
5438       return true;
5439 
5440     const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
5441                                               getContext());
5442     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5443     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
5444     return false;
5445   }
5446   case AsmToken::Equal: {
5447     S = Parser.getTok().getLoc();
5448     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5449       return Error(S, "unexpected token in operand");
5450     Parser.Lex(); // Eat '='
5451     const MCExpr *SubExprVal;
5452     if (getParser().parseExpression(SubExprVal))
5453       return true;
5454     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5455     Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
5456     return false;
5457   }
5458   }
5459 }
5460 
5461 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
5462 //  :lower16: and :upper16:.
5463 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
5464   MCAsmParser &Parser = getParser();
5465   RefKind = ARMMCExpr::VK_ARM_None;
5466 
5467   // consume an optional '#' (GNU compatibility)
5468   if (getLexer().is(AsmToken::Hash))
5469     Parser.Lex();
5470 
5471   // :lower16: and :upper16: modifiers
5472   assert(getLexer().is(AsmToken::Colon) && "expected a :");
5473   Parser.Lex(); // Eat ':'
5474 
5475   if (getLexer().isNot(AsmToken::Identifier)) {
5476     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
5477     return true;
5478   }
5479 
5480   enum {
5481     COFF = (1 << MCObjectFileInfo::IsCOFF),
5482     ELF = (1 << MCObjectFileInfo::IsELF),
5483     MACHO = (1 << MCObjectFileInfo::IsMachO)
5484   };
5485   static const struct PrefixEntry {
5486     const char *Spelling;
5487     ARMMCExpr::VariantKind VariantKind;
5488     uint8_t SupportedFormats;
5489   } PrefixEntries[] = {
5490     { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
5491     { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
5492   };
5493 
5494   StringRef IDVal = Parser.getTok().getIdentifier();
5495 
5496   const auto &Prefix =
5497       std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
5498                    [&IDVal](const PrefixEntry &PE) {
5499                       return PE.Spelling == IDVal;
5500                    });
5501   if (Prefix == std::end(PrefixEntries)) {
5502     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
5503     return true;
5504   }
5505 
5506   uint8_t CurrentFormat;
5507   switch (getContext().getObjectFileInfo()->getObjectFileType()) {
5508   case MCObjectFileInfo::IsMachO:
5509     CurrentFormat = MACHO;
5510     break;
5511   case MCObjectFileInfo::IsELF:
5512     CurrentFormat = ELF;
5513     break;
5514   case MCObjectFileInfo::IsCOFF:
5515     CurrentFormat = COFF;
5516     break;
5517   }
5518 
5519   if (~Prefix->SupportedFormats & CurrentFormat) {
5520     Error(Parser.getTok().getLoc(),
5521           "cannot represent relocation in the current file format");
5522     return true;
5523   }
5524 
5525   RefKind = Prefix->VariantKind;
5526   Parser.Lex();
5527 
5528   if (getLexer().isNot(AsmToken::Colon)) {
5529     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
5530     return true;
5531   }
5532   Parser.Lex(); // Eat the last ':'
5533 
5534   return false;
5535 }
5536 
5537 /// \brief Given a mnemonic, split out possible predication code and carry
5538 /// setting letters to form a canonical mnemonic and flags.
5539 //
5540 // FIXME: Would be nice to autogen this.
5541 // FIXME: This is a bit of a maze of special cases.
5542 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
5543                                       unsigned &PredicationCode,
5544                                       bool &CarrySetting,
5545                                       unsigned &ProcessorIMod,
5546                                       StringRef &ITMask) {
5547   PredicationCode = ARMCC::AL;
5548   CarrySetting = false;
5549   ProcessorIMod = 0;
5550 
5551   // Ignore some mnemonics we know aren't predicated forms.
5552   //
5553   // FIXME: Would be nice to autogen this.
5554   if ((Mnemonic == "movs" && isThumb()) ||
5555       Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
5556       Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
5557       Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
5558       Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
5559       Mnemonic == "vaclt" || Mnemonic == "vacle"  || Mnemonic == "hlt" ||
5560       Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
5561       Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
5562       Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
5563       Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
5564       Mnemonic == "vcvta" || Mnemonic == "vcvtn"  || Mnemonic == "vcvtp" ||
5565       Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
5566       Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
5567       Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
5568       Mnemonic == "bxns"  || Mnemonic == "blxns")
5569     return Mnemonic;
5570 
5571   // First, split out any predication code. Ignore mnemonics we know aren't
5572   // predicated but do have a carry-set and so weren't caught above.
5573   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
5574       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
5575       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
5576       Mnemonic != "sbcs" && Mnemonic != "rscs") {
5577     unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
5578       .Case("eq", ARMCC::EQ)
5579       .Case("ne", ARMCC::NE)
5580       .Case("hs", ARMCC::HS)
5581       .Case("cs", ARMCC::HS)
5582       .Case("lo", ARMCC::LO)
5583       .Case("cc", ARMCC::LO)
5584       .Case("mi", ARMCC::MI)
5585       .Case("pl", ARMCC::PL)
5586       .Case("vs", ARMCC::VS)
5587       .Case("vc", ARMCC::VC)
5588       .Case("hi", ARMCC::HI)
5589       .Case("ls", ARMCC::LS)
5590       .Case("ge", ARMCC::GE)
5591       .Case("lt", ARMCC::LT)
5592       .Case("gt", ARMCC::GT)
5593       .Case("le", ARMCC::LE)
5594       .Case("al", ARMCC::AL)
5595       .Default(~0U);
5596     if (CC != ~0U) {
5597       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
5598       PredicationCode = CC;
5599     }
5600   }
5601 
5602   // Next, determine if we have a carry setting bit. We explicitly ignore all
5603   // the instructions we know end in 's'.
5604   if (Mnemonic.endswith("s") &&
5605       !(Mnemonic == "cps" || Mnemonic == "mls" ||
5606         Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
5607         Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
5608         Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
5609         Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
5610         Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
5611         Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
5612         Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
5613         Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
5614         Mnemonic == "bxns" || Mnemonic == "blxns" ||
5615         (Mnemonic == "movs" && isThumb()))) {
5616     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
5617     CarrySetting = true;
5618   }
5619 
5620   // The "cps" instruction can have a interrupt mode operand which is glued into
5621   // the mnemonic. Check if this is the case, split it and parse the imod op
5622   if (Mnemonic.startswith("cps")) {
5623     // Split out any imod code.
5624     unsigned IMod =
5625       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
5626       .Case("ie", ARM_PROC::IE)
5627       .Case("id", ARM_PROC::ID)
5628       .Default(~0U);
5629     if (IMod != ~0U) {
5630       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
5631       ProcessorIMod = IMod;
5632     }
5633   }
5634 
5635   // The "it" instruction has the condition mask on the end of the mnemonic.
5636   if (Mnemonic.startswith("it")) {
5637     ITMask = Mnemonic.slice(2, Mnemonic.size());
5638     Mnemonic = Mnemonic.slice(0, 2);
5639   }
5640 
5641   return Mnemonic;
5642 }
5643 
5644 /// \brief Given a canonical mnemonic, determine if the instruction ever allows
5645 /// inclusion of carry set or predication code operands.
5646 //
5647 // FIXME: It would be nice to autogen this.
5648 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
5649                                          bool &CanAcceptCarrySet,
5650                                          bool &CanAcceptPredicationCode) {
5651   CanAcceptCarrySet =
5652       Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5653       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
5654       Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
5655       Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
5656       Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
5657       Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
5658       Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
5659       (!isThumb() &&
5660        (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
5661         Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
5662 
5663   if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
5664       Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
5665       Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
5666       Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
5667       Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
5668       Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
5669       Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
5670       Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
5671       Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
5672       Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
5673       (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
5674       Mnemonic == "vmovx" || Mnemonic == "vins") {
5675     // These mnemonics are never predicable
5676     CanAcceptPredicationCode = false;
5677   } else if (!isThumb()) {
5678     // Some instructions are only predicable in Thumb mode
5679     CanAcceptPredicationCode =
5680         Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
5681         Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
5682         Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" &&
5683         Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" &&
5684         Mnemonic != "ldc2" && Mnemonic != "ldc2l" && Mnemonic != "stc2" &&
5685         Mnemonic != "stc2l" && !Mnemonic.startswith("rfe") &&
5686         !Mnemonic.startswith("srs");
5687   } else if (isThumbOne()) {
5688     if (hasV6MOps())
5689       CanAcceptPredicationCode = Mnemonic != "movs";
5690     else
5691       CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
5692   } else
5693     CanAcceptPredicationCode = true;
5694 }
5695 
5696 // \brief Some Thumb instructions have two operand forms that are not
5697 // available as three operand, convert to two operand form if possible.
5698 //
5699 // FIXME: We would really like to be able to tablegen'erate this.
5700 void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
5701                                                  bool CarrySetting,
5702                                                  OperandVector &Operands) {
5703   if (Operands.size() != 6)
5704     return;
5705 
5706   const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5707         auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
5708   if (!Op3.isReg() || !Op4.isReg())
5709     return;
5710 
5711   auto Op3Reg = Op3.getReg();
5712   auto Op4Reg = Op4.getReg();
5713 
5714   // For most Thumb2 cases we just generate the 3 operand form and reduce
5715   // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
5716   // won't accept SP or PC so we do the transformation here taking care
5717   // with immediate range in the 'add sp, sp #imm' case.
5718   auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
5719   if (isThumbTwo()) {
5720     if (Mnemonic != "add")
5721       return;
5722     bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
5723                         (Op5.isReg() && Op5.getReg() == ARM::PC);
5724     if (!TryTransform) {
5725       TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
5726                       (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
5727                      !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
5728                        Op5.isImm() && !Op5.isImm0_508s4());
5729     }
5730     if (!TryTransform)
5731       return;
5732   } else if (!isThumbOne())
5733     return;
5734 
5735   if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
5736         Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5737         Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
5738         Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
5739     return;
5740 
5741   // If first 2 operands of a 3 operand instruction are the same
5742   // then transform to 2 operand version of the same instruction
5743   // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
5744   bool Transform = Op3Reg == Op4Reg;
5745 
5746   // For communtative operations, we might be able to transform if we swap
5747   // Op4 and Op5.  The 'ADD Rdm, SP, Rdm' form is already handled specially
5748   // as tADDrsp.
5749   const ARMOperand *LastOp = &Op5;
5750   bool Swap = false;
5751   if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
5752       ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
5753        Mnemonic == "and" || Mnemonic == "eor" ||
5754        Mnemonic == "adc" || Mnemonic == "orr")) {
5755     Swap = true;
5756     LastOp = &Op4;
5757     Transform = true;
5758   }
5759 
5760   // If both registers are the same then remove one of them from
5761   // the operand list, with certain exceptions.
5762   if (Transform) {
5763     // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
5764     // 2 operand forms don't exist.
5765     if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
5766         LastOp->isReg())
5767       Transform = false;
5768 
5769     // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
5770     // 3-bits because the ARMARM says not to.
5771     if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
5772       Transform = false;
5773   }
5774 
5775   if (Transform) {
5776     if (Swap)
5777       std::swap(Op4, Op5);
5778     Operands.erase(Operands.begin() + 3);
5779   }
5780 }
5781 
5782 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
5783                                           OperandVector &Operands) {
5784   // FIXME: This is all horribly hacky. We really need a better way to deal
5785   // with optional operands like this in the matcher table.
5786 
5787   // The 'mov' mnemonic is special. One variant has a cc_out operand, while
5788   // another does not. Specifically, the MOVW instruction does not. So we
5789   // special case it here and remove the defaulted (non-setting) cc_out
5790   // operand if that's the instruction we're trying to match.
5791   //
5792   // We do this as post-processing of the explicit operands rather than just
5793   // conditionally adding the cc_out in the first place because we need
5794   // to check the type of the parsed immediate operand.
5795   if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
5796       !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
5797       static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
5798       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5799     return true;
5800 
5801   // Register-register 'add' for thumb does not have a cc_out operand
5802   // when there are only two register operands.
5803   if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
5804       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5805       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5806       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5807     return true;
5808   // Register-register 'add' for thumb does not have a cc_out operand
5809   // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
5810   // have to check the immediate range here since Thumb2 has a variant
5811   // that can handle a different range and has a cc_out operand.
5812   if (((isThumb() && Mnemonic == "add") ||
5813        (isThumbTwo() && Mnemonic == "sub")) &&
5814       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5815       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5816       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
5817       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5818       ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
5819        static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
5820     return true;
5821   // For Thumb2, add/sub immediate does not have a cc_out operand for the
5822   // imm0_4095 variant. That's the least-preferred variant when
5823   // selecting via the generic "add" mnemonic, so to know that we
5824   // should remove the cc_out operand, we have to explicitly check that
5825   // it's not one of the other variants. Ugh.
5826   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
5827       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5828       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5829       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
5830     // Nest conditions rather than one big 'if' statement for readability.
5831     //
5832     // If both registers are low, we're in an IT block, and the immediate is
5833     // in range, we should use encoding T1 instead, which has a cc_out.
5834     if (inITBlock() &&
5835         isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
5836         isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
5837         static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
5838       return false;
5839     // Check against T3. If the second register is the PC, this is an
5840     // alternate form of ADR, which uses encoding T4, so check for that too.
5841     if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
5842         static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
5843       return false;
5844 
5845     // Otherwise, we use encoding T4, which does not have a cc_out
5846     // operand.
5847     return true;
5848   }
5849 
5850   // The thumb2 multiply instruction doesn't have a CCOut register, so
5851   // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
5852   // use the 16-bit encoding or not.
5853   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
5854       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5855       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5856       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5857       static_cast<ARMOperand &>(*Operands[5]).isReg() &&
5858       // If the registers aren't low regs, the destination reg isn't the
5859       // same as one of the source regs, or the cc_out operand is zero
5860       // outside of an IT block, we have to use the 32-bit encoding, so
5861       // remove the cc_out operand.
5862       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5863        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5864        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
5865        !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5866                             static_cast<ARMOperand &>(*Operands[5]).getReg() &&
5867                         static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5868                             static_cast<ARMOperand &>(*Operands[4]).getReg())))
5869     return true;
5870 
5871   // Also check the 'mul' syntax variant that doesn't specify an explicit
5872   // destination register.
5873   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
5874       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5875       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5876       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5877       // If the registers aren't low regs  or the cc_out operand is zero
5878       // outside of an IT block, we have to use the 32-bit encoding, so
5879       // remove the cc_out operand.
5880       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5881        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5882        !inITBlock()))
5883     return true;
5884 
5885 
5886 
5887   // Register-register 'add/sub' for thumb does not have a cc_out operand
5888   // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
5889   // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
5890   // right, this will result in better diagnostics (which operand is off)
5891   // anyway.
5892   if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
5893       (Operands.size() == 5 || Operands.size() == 6) &&
5894       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5895       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
5896       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5897       (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
5898        (Operands.size() == 6 &&
5899         static_cast<ARMOperand &>(*Operands[5]).isImm())))
5900     return true;
5901 
5902   return false;
5903 }
5904 
5905 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
5906                                               OperandVector &Operands) {
5907   // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON
5908   unsigned RegIdx = 3;
5909   if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") &&
5910       (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
5911        static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
5912     if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
5913         (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
5914          static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
5915       RegIdx = 4;
5916 
5917     if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
5918         (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
5919              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
5920          ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
5921              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
5922       return true;
5923   }
5924   return false;
5925 }
5926 
5927 static bool isDataTypeToken(StringRef Tok) {
5928   return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
5929     Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
5930     Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
5931     Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
5932     Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
5933     Tok == ".f" || Tok == ".d";
5934 }
5935 
5936 // FIXME: This bit should probably be handled via an explicit match class
5937 // in the .td files that matches the suffix instead of having it be
5938 // a literal string token the way it is now.
5939 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
5940   return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
5941 }
5942 static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features,
5943                                  unsigned VariantID);
5944 
5945 static bool RequiresVFPRegListValidation(StringRef Inst,
5946                                          bool &AcceptSinglePrecisionOnly,
5947                                          bool &AcceptDoublePrecisionOnly) {
5948   if (Inst.size() < 7)
5949     return false;
5950 
5951   if (Inst.startswith("fldm") || Inst.startswith("fstm")) {
5952     StringRef AddressingMode = Inst.substr(4, 2);
5953     if (AddressingMode == "ia" || AddressingMode == "db" ||
5954         AddressingMode == "ea" || AddressingMode == "fd") {
5955       AcceptSinglePrecisionOnly = Inst[6] == 's';
5956       AcceptDoublePrecisionOnly = Inst[6] == 'd' || Inst[6] == 'x';
5957       return true;
5958     }
5959   }
5960 
5961   return false;
5962 }
5963 
5964 /// Parse an arm instruction mnemonic followed by its operands.
5965 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5966                                     SMLoc NameLoc, OperandVector &Operands) {
5967   MCAsmParser &Parser = getParser();
5968   // FIXME: Can this be done via tablegen in some fashion?
5969   bool RequireVFPRegisterListCheck;
5970   bool AcceptSinglePrecisionOnly;
5971   bool AcceptDoublePrecisionOnly;
5972   RequireVFPRegisterListCheck =
5973     RequiresVFPRegListValidation(Name, AcceptSinglePrecisionOnly,
5974                                  AcceptDoublePrecisionOnly);
5975 
5976   // Apply mnemonic aliases before doing anything else, as the destination
5977   // mnemonic may include suffices and we want to handle them normally.
5978   // The generic tblgen'erated code does this later, at the start of
5979   // MatchInstructionImpl(), but that's too late for aliases that include
5980   // any sort of suffix.
5981   uint64_t AvailableFeatures = getAvailableFeatures();
5982   unsigned AssemblerDialect = getParser().getAssemblerDialect();
5983   applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5984 
5985   // First check for the ARM-specific .req directive.
5986   if (Parser.getTok().is(AsmToken::Identifier) &&
5987       Parser.getTok().getIdentifier() == ".req") {
5988     parseDirectiveReq(Name, NameLoc);
5989     // We always return 'error' for this, as we're done with this
5990     // statement and don't need to match the 'instruction."
5991     return true;
5992   }
5993 
5994   // Create the leading tokens for the mnemonic, split by '.' characters.
5995   size_t Start = 0, Next = Name.find('.');
5996   StringRef Mnemonic = Name.slice(Start, Next);
5997 
5998   // Split out the predication code and carry setting flag from the mnemonic.
5999   unsigned PredicationCode;
6000   unsigned ProcessorIMod;
6001   bool CarrySetting;
6002   StringRef ITMask;
6003   Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
6004                            ProcessorIMod, ITMask);
6005 
6006   // In Thumb1, only the branch (B) instruction can be predicated.
6007   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
6008     return Error(NameLoc, "conditional execution not supported in Thumb1");
6009   }
6010 
6011   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
6012 
6013   // Handle the IT instruction ITMask. Convert it to a bitmask. This
6014   // is the mask as it will be for the IT encoding if the conditional
6015   // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
6016   // where the conditional bit0 is zero, the instruction post-processing
6017   // will adjust the mask accordingly.
6018   if (Mnemonic == "it") {
6019     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
6020     if (ITMask.size() > 3) {
6021       return Error(Loc, "too many conditions on IT instruction");
6022     }
6023     unsigned Mask = 8;
6024     for (unsigned i = ITMask.size(); i != 0; --i) {
6025       char pos = ITMask[i - 1];
6026       if (pos != 't' && pos != 'e') {
6027         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
6028       }
6029       Mask >>= 1;
6030       if (ITMask[i - 1] == 't')
6031         Mask |= 8;
6032     }
6033     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
6034   }
6035 
6036   // FIXME: This is all a pretty gross hack. We should automatically handle
6037   // optional operands like this via tblgen.
6038 
6039   // Next, add the CCOut and ConditionCode operands, if needed.
6040   //
6041   // For mnemonics which can ever incorporate a carry setting bit or predication
6042   // code, our matching model involves us always generating CCOut and
6043   // ConditionCode operands to match the mnemonic "as written" and then we let
6044   // the matcher deal with finding the right instruction or generating an
6045   // appropriate error.
6046   bool CanAcceptCarrySet, CanAcceptPredicationCode;
6047   getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
6048 
6049   // If we had a carry-set on an instruction that can't do that, issue an
6050   // error.
6051   if (!CanAcceptCarrySet && CarrySetting) {
6052     return Error(NameLoc, "instruction '" + Mnemonic +
6053                  "' can not set flags, but 's' suffix specified");
6054   }
6055   // If we had a predication code on an instruction that can't do that, issue an
6056   // error.
6057   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
6058     return Error(NameLoc, "instruction '" + Mnemonic +
6059                  "' is not predicable, but condition code specified");
6060   }
6061 
6062   // Add the carry setting operand, if necessary.
6063   if (CanAcceptCarrySet) {
6064     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
6065     Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
6066                                                Loc));
6067   }
6068 
6069   // Add the predication code operand, if necessary.
6070   if (CanAcceptPredicationCode) {
6071     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
6072                                       CarrySetting);
6073     Operands.push_back(ARMOperand::CreateCondCode(
6074                          ARMCC::CondCodes(PredicationCode), Loc));
6075   }
6076 
6077   // Add the processor imod operand, if necessary.
6078   if (ProcessorIMod) {
6079     Operands.push_back(ARMOperand::CreateImm(
6080           MCConstantExpr::create(ProcessorIMod, getContext()),
6081                                  NameLoc, NameLoc));
6082   } else if (Mnemonic == "cps" && isMClass()) {
6083     return Error(NameLoc, "instruction 'cps' requires effect for M-class");
6084   }
6085 
6086   // Add the remaining tokens in the mnemonic.
6087   while (Next != StringRef::npos) {
6088     Start = Next;
6089     Next = Name.find('.', Start + 1);
6090     StringRef ExtraToken = Name.slice(Start, Next);
6091 
6092     // Some NEON instructions have an optional datatype suffix that is
6093     // completely ignored. Check for that.
6094     if (isDataTypeToken(ExtraToken) &&
6095         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
6096       continue;
6097 
6098     // For for ARM mode generate an error if the .n qualifier is used.
6099     if (ExtraToken == ".n" && !isThumb()) {
6100       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6101       return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
6102                    "arm mode");
6103     }
6104 
6105     // The .n qualifier is always discarded as that is what the tables
6106     // and matcher expect.  In ARM mode the .w qualifier has no effect,
6107     // so discard it to avoid errors that can be caused by the matcher.
6108     if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
6109       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6110       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
6111     }
6112   }
6113 
6114   // Read the remaining operands.
6115   if (getLexer().isNot(AsmToken::EndOfStatement)) {
6116     // Read the first operand.
6117     if (parseOperand(Operands, Mnemonic)) {
6118       return true;
6119     }
6120 
6121     while (getLexer().is(AsmToken::Comma)) {
6122       Parser.Lex();  // Eat the comma.
6123 
6124       // Parse and remember the operand.
6125       if (parseOperand(Operands, Mnemonic)) {
6126         return true;
6127       }
6128     }
6129   }
6130 
6131   if (getLexer().isNot(AsmToken::EndOfStatement)) {
6132     return TokError("unexpected token in argument list");
6133   }
6134 
6135   Parser.Lex(); // Consume the EndOfStatement
6136 
6137   if (RequireVFPRegisterListCheck) {
6138     ARMOperand &Op = static_cast<ARMOperand &>(*Operands.back());
6139     if (AcceptSinglePrecisionOnly && !Op.isSPRRegList())
6140       return Error(Op.getStartLoc(),
6141                    "VFP/Neon single precision register expected");
6142     if (AcceptDoublePrecisionOnly && !Op.isDPRRegList())
6143       return Error(Op.getStartLoc(),
6144                    "VFP/Neon double precision register expected");
6145   }
6146 
6147   tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
6148 
6149   // Some instructions, mostly Thumb, have forms for the same mnemonic that
6150   // do and don't have a cc_out optional-def operand. With some spot-checks
6151   // of the operand list, we can figure out which variant we're trying to
6152   // parse and adjust accordingly before actually matching. We shouldn't ever
6153   // try to remove a cc_out operand that was explicitly set on the
6154   // mnemonic, of course (CarrySetting == true). Reason number #317 the
6155   // table driven matcher doesn't fit well with the ARM instruction set.
6156   if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
6157     Operands.erase(Operands.begin() + 1);
6158 
6159   // Some instructions have the same mnemonic, but don't always
6160   // have a predicate. Distinguish them here and delete the
6161   // predicate if needed.
6162   if (shouldOmitPredicateOperand(Mnemonic, Operands))
6163     Operands.erase(Operands.begin() + 1);
6164 
6165   // ARM mode 'blx' need special handling, as the register operand version
6166   // is predicable, but the label operand version is not. So, we can't rely
6167   // on the Mnemonic based checking to correctly figure out when to put
6168   // a k_CondCode operand in the list. If we're trying to match the label
6169   // version, remove the k_CondCode operand here.
6170   if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
6171       static_cast<ARMOperand &>(*Operands[2]).isImm())
6172     Operands.erase(Operands.begin() + 1);
6173 
6174   // Adjust operands of ldrexd/strexd to MCK_GPRPair.
6175   // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
6176   // a single GPRPair reg operand is used in the .td file to replace the two
6177   // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
6178   // expressed as a GPRPair, so we have to manually merge them.
6179   // FIXME: We would really like to be able to tablegen'erate this.
6180   if (!isThumb() && Operands.size() > 4 &&
6181       (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
6182        Mnemonic == "stlexd")) {
6183     bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
6184     unsigned Idx = isLoad ? 2 : 3;
6185     ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
6186     ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
6187 
6188     const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
6189     // Adjust only if Op1 and Op2 are GPRs.
6190     if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
6191         MRC.contains(Op2.getReg())) {
6192       unsigned Reg1 = Op1.getReg();
6193       unsigned Reg2 = Op2.getReg();
6194       unsigned Rt = MRI->getEncodingValue(Reg1);
6195       unsigned Rt2 = MRI->getEncodingValue(Reg2);
6196 
6197       // Rt2 must be Rt + 1 and Rt must be even.
6198       if (Rt + 1 != Rt2 || (Rt & 1)) {
6199         Error(Op2.getStartLoc(), isLoad
6200                                      ? "destination operands must be sequential"
6201                                      : "source operands must be sequential");
6202         return true;
6203       }
6204       unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
6205           &(MRI->getRegClass(ARM::GPRPairRegClassID)));
6206       Operands[Idx] =
6207           ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
6208       Operands.erase(Operands.begin() + Idx + 1);
6209     }
6210   }
6211 
6212   // GNU Assembler extension (compatibility)
6213   if ((Mnemonic == "ldrd" || Mnemonic == "strd")) {
6214     ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
6215     ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6216     if (Op3.isMem()) {
6217       assert(Op2.isReg() && "expected register argument");
6218 
6219       unsigned SuperReg = MRI->getMatchingSuperReg(
6220           Op2.getReg(), ARM::gsub_0, &MRI->getRegClass(ARM::GPRPairRegClassID));
6221 
6222       assert(SuperReg && "expected register pair");
6223 
6224       unsigned PairedReg = MRI->getSubReg(SuperReg, ARM::gsub_1);
6225 
6226       Operands.insert(
6227           Operands.begin() + 3,
6228           ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6229     }
6230   }
6231 
6232   // FIXME: As said above, this is all a pretty gross hack.  This instruction
6233   // does not fit with other "subs" and tblgen.
6234   // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
6235   // so the Mnemonic is the original name "subs" and delete the predicate
6236   // operand so it will match the table entry.
6237   if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
6238       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6239       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
6240       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6241       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
6242       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6243     Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
6244     Operands.erase(Operands.begin() + 1);
6245   }
6246   return false;
6247 }
6248 
6249 // Validate context-sensitive operand constraints.
6250 
6251 // return 'true' if register list contains non-low GPR registers,
6252 // 'false' otherwise. If Reg is in the register list or is HiReg, set
6253 // 'containsReg' to true.
6254 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
6255                                  unsigned Reg, unsigned HiReg,
6256                                  bool &containsReg) {
6257   containsReg = false;
6258   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
6259     unsigned OpReg = Inst.getOperand(i).getReg();
6260     if (OpReg == Reg)
6261       containsReg = true;
6262     // Anything other than a low register isn't legal here.
6263     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
6264       return true;
6265   }
6266   return false;
6267 }
6268 
6269 // Check if the specified regisgter is in the register list of the inst,
6270 // starting at the indicated operand number.
6271 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
6272   for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
6273     unsigned OpReg = Inst.getOperand(i).getReg();
6274     if (OpReg == Reg)
6275       return true;
6276   }
6277   return false;
6278 }
6279 
6280 // Return true if instruction has the interesting property of being
6281 // allowed in IT blocks, but not being predicable.
6282 static bool instIsBreakpoint(const MCInst &Inst) {
6283     return Inst.getOpcode() == ARM::tBKPT ||
6284            Inst.getOpcode() == ARM::BKPT ||
6285            Inst.getOpcode() == ARM::tHLT ||
6286            Inst.getOpcode() == ARM::HLT;
6287 
6288 }
6289 
6290 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
6291                                        const OperandVector &Operands,
6292                                        unsigned ListNo, bool IsARPop) {
6293   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6294   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6295 
6296   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6297   bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
6298   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6299 
6300   if (!IsARPop && ListContainsSP)
6301     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6302                  "SP may not be in the register list");
6303   else if (ListContainsPC && ListContainsLR)
6304     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6305                  "PC and LR may not be in the register list simultaneously");
6306   else if (inITBlock() && !lastInITBlock() && ListContainsPC)
6307     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6308                  "instruction must be outside of IT block or the last "
6309                  "instruction in an IT block");
6310   return false;
6311 }
6312 
6313 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
6314                                        const OperandVector &Operands,
6315                                        unsigned ListNo) {
6316   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6317   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6318 
6319   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6320   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6321 
6322   if (ListContainsSP && ListContainsPC)
6323     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6324                  "SP and PC may not be in the register list");
6325   else if (ListContainsSP)
6326     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6327                  "SP may not be in the register list");
6328   else if (ListContainsPC)
6329     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6330                  "PC may not be in the register list");
6331   return false;
6332 }
6333 
6334 // FIXME: We would really like to be able to tablegen'erate this.
6335 bool ARMAsmParser::validateInstruction(MCInst &Inst,
6336                                        const OperandVector &Operands) {
6337   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
6338   SMLoc Loc = Operands[0]->getStartLoc();
6339 
6340   // Check the IT block state first.
6341   // NOTE: BKPT and HLT instructions have the interesting property of being
6342   // allowed in IT blocks, but not being predicable. They just always execute.
6343   if (inITBlock() && !instIsBreakpoint(Inst)) {
6344     // The instruction must be predicable.
6345     if (!MCID.isPredicable())
6346       return Error(Loc, "instructions in IT block must be predicable");
6347     unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
6348     if (Cond != currentITCond()) {
6349       // Find the condition code Operand to get its SMLoc information.
6350       SMLoc CondLoc;
6351       for (unsigned I = 1; I < Operands.size(); ++I)
6352         if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
6353           CondLoc = Operands[I]->getStartLoc();
6354       return Error(CondLoc, "incorrect condition in IT block; got '" +
6355                    StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
6356                    "', but expected '" +
6357                    ARMCondCodeToString(ARMCC::CondCodes(currentITCond())) + "'");
6358     }
6359   // Check for non-'al' condition codes outside of the IT block.
6360   } else if (isThumbTwo() && MCID.isPredicable() &&
6361              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6362              ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
6363              Inst.getOpcode() != ARM::t2Bcc) {
6364     return Error(Loc, "predicated instructions must be in IT block");
6365   } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
6366              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6367                  ARMCC::AL) {
6368     return Warning(Loc, "predicated instructions should be in IT block");
6369   }
6370 
6371   const unsigned Opcode = Inst.getOpcode();
6372   switch (Opcode) {
6373   case ARM::LDRD:
6374   case ARM::LDRD_PRE:
6375   case ARM::LDRD_POST: {
6376     const unsigned RtReg = Inst.getOperand(0).getReg();
6377 
6378     // Rt can't be R14.
6379     if (RtReg == ARM::LR)
6380       return Error(Operands[3]->getStartLoc(),
6381                    "Rt can't be R14");
6382 
6383     const unsigned Rt = MRI->getEncodingValue(RtReg);
6384     // Rt must be even-numbered.
6385     if ((Rt & 1) == 1)
6386       return Error(Operands[3]->getStartLoc(),
6387                    "Rt must be even-numbered");
6388 
6389     // Rt2 must be Rt + 1.
6390     const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6391     if (Rt2 != Rt + 1)
6392       return Error(Operands[3]->getStartLoc(),
6393                    "destination operands must be sequential");
6394 
6395     if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
6396       const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
6397       // For addressing modes with writeback, the base register needs to be
6398       // different from the destination registers.
6399       if (Rn == Rt || Rn == Rt2)
6400         return Error(Operands[3]->getStartLoc(),
6401                      "base register needs to be different from destination "
6402                      "registers");
6403     }
6404 
6405     return false;
6406   }
6407   case ARM::t2LDRDi8:
6408   case ARM::t2LDRD_PRE:
6409   case ARM::t2LDRD_POST: {
6410     // Rt2 must be different from Rt.
6411     unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6412     unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6413     if (Rt2 == Rt)
6414       return Error(Operands[3]->getStartLoc(),
6415                    "destination operands can't be identical");
6416     return false;
6417   }
6418   case ARM::t2BXJ: {
6419     const unsigned RmReg = Inst.getOperand(0).getReg();
6420     // Rm = SP is no longer unpredictable in v8-A
6421     if (RmReg == ARM::SP && !hasV8Ops())
6422       return Error(Operands[2]->getStartLoc(),
6423                    "r13 (SP) is an unpredictable operand to BXJ");
6424     return false;
6425   }
6426   case ARM::STRD: {
6427     // Rt2 must be Rt + 1.
6428     unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6429     unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6430     if (Rt2 != Rt + 1)
6431       return Error(Operands[3]->getStartLoc(),
6432                    "source operands must be sequential");
6433     return false;
6434   }
6435   case ARM::STRD_PRE:
6436   case ARM::STRD_POST: {
6437     // Rt2 must be Rt + 1.
6438     unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6439     unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6440     if (Rt2 != Rt + 1)
6441       return Error(Operands[3]->getStartLoc(),
6442                    "source operands must be sequential");
6443     return false;
6444   }
6445   case ARM::STR_PRE_IMM:
6446   case ARM::STR_PRE_REG:
6447   case ARM::STR_POST_IMM:
6448   case ARM::STR_POST_REG:
6449   case ARM::STRH_PRE:
6450   case ARM::STRH_POST:
6451   case ARM::STRB_PRE_IMM:
6452   case ARM::STRB_PRE_REG:
6453   case ARM::STRB_POST_IMM:
6454   case ARM::STRB_POST_REG: {
6455     // Rt must be different from Rn.
6456     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6457     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6458 
6459     if (Rt == Rn)
6460       return Error(Operands[3]->getStartLoc(),
6461                    "source register and base register can't be identical");
6462     return false;
6463   }
6464   case ARM::LDR_PRE_IMM:
6465   case ARM::LDR_PRE_REG:
6466   case ARM::LDR_POST_IMM:
6467   case ARM::LDR_POST_REG:
6468   case ARM::LDRH_PRE:
6469   case ARM::LDRH_POST:
6470   case ARM::LDRSH_PRE:
6471   case ARM::LDRSH_POST:
6472   case ARM::LDRB_PRE_IMM:
6473   case ARM::LDRB_PRE_REG:
6474   case ARM::LDRB_POST_IMM:
6475   case ARM::LDRB_POST_REG:
6476   case ARM::LDRSB_PRE:
6477   case ARM::LDRSB_POST: {
6478     // Rt must be different from Rn.
6479     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6480     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6481 
6482     if (Rt == Rn)
6483       return Error(Operands[3]->getStartLoc(),
6484                    "destination register and base register can't be identical");
6485     return false;
6486   }
6487   case ARM::SBFX:
6488   case ARM::UBFX: {
6489     // Width must be in range [1, 32-lsb].
6490     unsigned LSB = Inst.getOperand(2).getImm();
6491     unsigned Widthm1 = Inst.getOperand(3).getImm();
6492     if (Widthm1 >= 32 - LSB)
6493       return Error(Operands[5]->getStartLoc(),
6494                    "bitfield width must be in range [1,32-lsb]");
6495     return false;
6496   }
6497   // Notionally handles ARM::tLDMIA_UPD too.
6498   case ARM::tLDMIA: {
6499     // If we're parsing Thumb2, the .w variant is available and handles
6500     // most cases that are normally illegal for a Thumb1 LDM instruction.
6501     // We'll make the transformation in processInstruction() if necessary.
6502     //
6503     // Thumb LDM instructions are writeback iff the base register is not
6504     // in the register list.
6505     unsigned Rn = Inst.getOperand(0).getReg();
6506     bool HasWritebackToken =
6507         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6508          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
6509     bool ListContainsBase;
6510     if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
6511       return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
6512                    "registers must be in range r0-r7");
6513     // If we should have writeback, then there should be a '!' token.
6514     if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
6515       return Error(Operands[2]->getStartLoc(),
6516                    "writeback operator '!' expected");
6517     // If we should not have writeback, there must not be a '!'. This is
6518     // true even for the 32-bit wide encodings.
6519     if (ListContainsBase && HasWritebackToken)
6520       return Error(Operands[3]->getStartLoc(),
6521                    "writeback operator '!' not allowed when base register "
6522                    "in register list");
6523 
6524     if (validatetLDMRegList(Inst, Operands, 3))
6525       return true;
6526     break;
6527   }
6528   case ARM::LDMIA_UPD:
6529   case ARM::LDMDB_UPD:
6530   case ARM::LDMIB_UPD:
6531   case ARM::LDMDA_UPD:
6532     // ARM variants loading and updating the same register are only officially
6533     // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
6534     if (!hasV7Ops())
6535       break;
6536     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6537       return Error(Operands.back()->getStartLoc(),
6538                    "writeback register not allowed in register list");
6539     break;
6540   case ARM::t2LDMIA:
6541   case ARM::t2LDMDB:
6542     if (validatetLDMRegList(Inst, Operands, 3))
6543       return true;
6544     break;
6545   case ARM::t2STMIA:
6546   case ARM::t2STMDB:
6547     if (validatetSTMRegList(Inst, Operands, 3))
6548       return true;
6549     break;
6550   case ARM::t2LDMIA_UPD:
6551   case ARM::t2LDMDB_UPD:
6552   case ARM::t2STMIA_UPD:
6553   case ARM::t2STMDB_UPD: {
6554     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6555       return Error(Operands.back()->getStartLoc(),
6556                    "writeback register not allowed in register list");
6557 
6558     if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
6559       if (validatetLDMRegList(Inst, Operands, 3))
6560         return true;
6561     } else {
6562       if (validatetSTMRegList(Inst, Operands, 3))
6563         return true;
6564     }
6565     break;
6566   }
6567   case ARM::sysLDMIA_UPD:
6568   case ARM::sysLDMDA_UPD:
6569   case ARM::sysLDMDB_UPD:
6570   case ARM::sysLDMIB_UPD:
6571     if (!listContainsReg(Inst, 3, ARM::PC))
6572       return Error(Operands[4]->getStartLoc(),
6573                    "writeback register only allowed on system LDM "
6574                    "if PC in register-list");
6575     break;
6576   case ARM::sysSTMIA_UPD:
6577   case ARM::sysSTMDA_UPD:
6578   case ARM::sysSTMDB_UPD:
6579   case ARM::sysSTMIB_UPD:
6580     return Error(Operands[2]->getStartLoc(),
6581                  "system STM cannot have writeback register");
6582   case ARM::tMUL: {
6583     // The second source operand must be the same register as the destination
6584     // operand.
6585     //
6586     // In this case, we must directly check the parsed operands because the
6587     // cvtThumbMultiply() function is written in such a way that it guarantees
6588     // this first statement is always true for the new Inst.  Essentially, the
6589     // destination is unconditionally copied into the second source operand
6590     // without checking to see if it matches what we actually parsed.
6591     if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
6592                                  ((ARMOperand &)*Operands[5]).getReg()) &&
6593         (((ARMOperand &)*Operands[3]).getReg() !=
6594          ((ARMOperand &)*Operands[4]).getReg())) {
6595       return Error(Operands[3]->getStartLoc(),
6596                    "destination register must match source register");
6597     }
6598     break;
6599   }
6600   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
6601   // so only issue a diagnostic for thumb1. The instructions will be
6602   // switched to the t2 encodings in processInstruction() if necessary.
6603   case ARM::tPOP: {
6604     bool ListContainsBase;
6605     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
6606         !isThumbTwo())
6607       return Error(Operands[2]->getStartLoc(),
6608                    "registers must be in range r0-r7 or pc");
6609     if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
6610       return true;
6611     break;
6612   }
6613   case ARM::tPUSH: {
6614     bool ListContainsBase;
6615     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
6616         !isThumbTwo())
6617       return Error(Operands[2]->getStartLoc(),
6618                    "registers must be in range r0-r7 or lr");
6619     if (validatetSTMRegList(Inst, Operands, 2))
6620       return true;
6621     break;
6622   }
6623   case ARM::tSTMIA_UPD: {
6624     bool ListContainsBase, InvalidLowList;
6625     InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
6626                                           0, ListContainsBase);
6627     if (InvalidLowList && !isThumbTwo())
6628       return Error(Operands[4]->getStartLoc(),
6629                    "registers must be in range r0-r7");
6630 
6631     // This would be converted to a 32-bit stm, but that's not valid if the
6632     // writeback register is in the list.
6633     if (InvalidLowList && ListContainsBase)
6634       return Error(Operands[4]->getStartLoc(),
6635                    "writeback operator '!' not allowed when base register "
6636                    "in register list");
6637 
6638     if (validatetSTMRegList(Inst, Operands, 4))
6639       return true;
6640     break;
6641   }
6642   case ARM::tADDrSP: {
6643     // If the non-SP source operand and the destination operand are not the
6644     // same, we need thumb2 (for the wide encoding), or we have an error.
6645     if (!isThumbTwo() &&
6646         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
6647       return Error(Operands[4]->getStartLoc(),
6648                    "source register must be the same as destination");
6649     }
6650     break;
6651   }
6652   // Final range checking for Thumb unconditional branch instructions.
6653   case ARM::tB:
6654     if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
6655       return Error(Operands[2]->getStartLoc(), "branch target out of range");
6656     break;
6657   case ARM::t2B: {
6658     int op = (Operands[2]->isImm()) ? 2 : 3;
6659     if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
6660       return Error(Operands[op]->getStartLoc(), "branch target out of range");
6661     break;
6662   }
6663   // Final range checking for Thumb conditional branch instructions.
6664   case ARM::tBcc:
6665     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
6666       return Error(Operands[2]->getStartLoc(), "branch target out of range");
6667     break;
6668   case ARM::t2Bcc: {
6669     int Op = (Operands[2]->isImm()) ? 2 : 3;
6670     if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
6671       return Error(Operands[Op]->getStartLoc(), "branch target out of range");
6672     break;
6673   }
6674   case ARM::tCBZ:
6675   case ARM::tCBNZ: {
6676     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
6677       return Error(Operands[2]->getStartLoc(), "branch target out of range");
6678     break;
6679   }
6680   case ARM::MOVi16:
6681   case ARM::t2MOVi16:
6682   case ARM::t2MOVTi16:
6683     {
6684     // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
6685     // especially when we turn it into a movw and the expression <symbol> does
6686     // not have a :lower16: or :upper16 as part of the expression.  We don't
6687     // want the behavior of silently truncating, which can be unexpected and
6688     // lead to bugs that are difficult to find since this is an easy mistake
6689     // to make.
6690     int i = (Operands[3]->isImm()) ? 3 : 4;
6691     ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
6692     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6693     if (CE) break;
6694     const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6695     if (!E) break;
6696     const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6697     if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
6698                        ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
6699       return Error(
6700           Op.getStartLoc(),
6701           "immediate expression for mov requires :lower16: or :upper16");
6702     break;
6703   }
6704   case ARM::HINT:
6705   case ARM::t2HINT: {
6706     if (hasRAS()) {
6707       // ESB is not predicable (pred must be AL)
6708       unsigned Imm8 = Inst.getOperand(0).getImm();
6709       unsigned Pred = Inst.getOperand(1).getImm();
6710       if (Imm8 == 0x10 && Pred != ARMCC::AL)
6711         return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
6712                                                  "predicable, but condition "
6713                                                  "code specified");
6714     }
6715     // Without the RAS extension, this behaves as any other unallocated hint.
6716     break;
6717   }
6718   }
6719 
6720   return false;
6721 }
6722 
6723 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
6724   switch(Opc) {
6725   default: llvm_unreachable("unexpected opcode!");
6726   // VST1LN
6727   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
6728   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6729   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6730   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
6731   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6732   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6733   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
6734   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
6735   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
6736 
6737   // VST2LN
6738   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
6739   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6740   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6741   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6742   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6743 
6744   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
6745   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6746   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6747   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6748   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6749 
6750   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
6751   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
6752   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
6753   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
6754   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
6755 
6756   // VST3LN
6757   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
6758   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6759   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6760   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
6761   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6762   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
6763   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6764   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6765   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
6766   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6767   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
6768   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
6769   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
6770   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
6771   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
6772 
6773   // VST3
6774   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
6775   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6776   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6777   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
6778   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6779   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6780   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
6781   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6782   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6783   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
6784   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6785   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6786   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
6787   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
6788   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
6789   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
6790   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
6791   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
6792 
6793   // VST4LN
6794   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
6795   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6796   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6797   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
6798   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6799   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
6800   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6801   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6802   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
6803   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6804   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
6805   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
6806   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
6807   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
6808   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
6809 
6810   // VST4
6811   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
6812   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6813   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6814   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
6815   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6816   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6817   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
6818   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6819   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6820   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
6821   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6822   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6823   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
6824   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
6825   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
6826   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
6827   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
6828   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
6829   }
6830 }
6831 
6832 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
6833   switch(Opc) {
6834   default: llvm_unreachable("unexpected opcode!");
6835   // VLD1LN
6836   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
6837   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6838   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6839   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
6840   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6841   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6842   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
6843   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
6844   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
6845 
6846   // VLD2LN
6847   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
6848   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6849   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6850   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
6851   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6852   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
6853   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6854   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6855   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
6856   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6857   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
6858   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
6859   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
6860   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
6861   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
6862 
6863   // VLD3DUP
6864   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
6865   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6866   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6867   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
6868   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6869   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6870   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
6871   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6872   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6873   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
6874   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6875   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6876   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
6877   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
6878   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
6879   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
6880   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
6881   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
6882 
6883   // VLD3LN
6884   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
6885   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6886   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6887   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
6888   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6889   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
6890   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6891   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6892   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
6893   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6894   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
6895   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
6896   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
6897   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
6898   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
6899 
6900   // VLD3
6901   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
6902   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6903   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6904   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
6905   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6906   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6907   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
6908   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6909   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6910   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
6911   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6912   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6913   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
6914   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
6915   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
6916   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
6917   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
6918   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
6919 
6920   // VLD4LN
6921   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
6922   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6923   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6924   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6925   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6926   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
6927   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6928   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6929   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6930   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6931   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
6932   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
6933   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
6934   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
6935   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
6936 
6937   // VLD4DUP
6938   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
6939   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6940   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6941   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
6942   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
6943   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6944   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
6945   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6946   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6947   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
6948   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
6949   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6950   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
6951   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
6952   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
6953   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
6954   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
6955   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
6956 
6957   // VLD4
6958   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
6959   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6960   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6961   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
6962   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6963   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6964   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
6965   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6966   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6967   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
6968   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6969   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6970   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
6971   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
6972   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
6973   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
6974   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
6975   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
6976   }
6977 }
6978 
6979 bool ARMAsmParser::processInstruction(MCInst &Inst,
6980                                       const OperandVector &Operands,
6981                                       MCStreamer &Out) {
6982   switch (Inst.getOpcode()) {
6983   // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
6984   case ARM::LDRT_POST:
6985   case ARM::LDRBT_POST: {
6986     const unsigned Opcode =
6987       (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
6988                                            : ARM::LDRBT_POST_IMM;
6989     MCInst TmpInst;
6990     TmpInst.setOpcode(Opcode);
6991     TmpInst.addOperand(Inst.getOperand(0));
6992     TmpInst.addOperand(Inst.getOperand(1));
6993     TmpInst.addOperand(Inst.getOperand(1));
6994     TmpInst.addOperand(MCOperand::createReg(0));
6995     TmpInst.addOperand(MCOperand::createImm(0));
6996     TmpInst.addOperand(Inst.getOperand(2));
6997     TmpInst.addOperand(Inst.getOperand(3));
6998     Inst = TmpInst;
6999     return true;
7000   }
7001   // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
7002   case ARM::STRT_POST:
7003   case ARM::STRBT_POST: {
7004     const unsigned Opcode =
7005       (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
7006                                            : ARM::STRBT_POST_IMM;
7007     MCInst TmpInst;
7008     TmpInst.setOpcode(Opcode);
7009     TmpInst.addOperand(Inst.getOperand(1));
7010     TmpInst.addOperand(Inst.getOperand(0));
7011     TmpInst.addOperand(Inst.getOperand(1));
7012     TmpInst.addOperand(MCOperand::createReg(0));
7013     TmpInst.addOperand(MCOperand::createImm(0));
7014     TmpInst.addOperand(Inst.getOperand(2));
7015     TmpInst.addOperand(Inst.getOperand(3));
7016     Inst = TmpInst;
7017     return true;
7018   }
7019   // Alias for alternate form of 'ADR Rd, #imm' instruction.
7020   case ARM::ADDri: {
7021     if (Inst.getOperand(1).getReg() != ARM::PC ||
7022         Inst.getOperand(5).getReg() != 0 ||
7023         !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
7024       return false;
7025     MCInst TmpInst;
7026     TmpInst.setOpcode(ARM::ADR);
7027     TmpInst.addOperand(Inst.getOperand(0));
7028     if (Inst.getOperand(2).isImm()) {
7029       // Immediate (mod_imm) will be in its encoded form, we must unencode it
7030       // before passing it to the ADR instruction.
7031       unsigned Enc = Inst.getOperand(2).getImm();
7032       TmpInst.addOperand(MCOperand::createImm(
7033         ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
7034     } else {
7035       // Turn PC-relative expression into absolute expression.
7036       // Reading PC provides the start of the current instruction + 8 and
7037       // the transform to adr is biased by that.
7038       MCSymbol *Dot = getContext().createTempSymbol();
7039       Out.EmitLabel(Dot);
7040       const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
7041       const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
7042                                                      MCSymbolRefExpr::VK_None,
7043                                                      getContext());
7044       const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
7045       const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
7046                                                      getContext());
7047       const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
7048                                                         getContext());
7049       TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
7050     }
7051     TmpInst.addOperand(Inst.getOperand(3));
7052     TmpInst.addOperand(Inst.getOperand(4));
7053     Inst = TmpInst;
7054     return true;
7055   }
7056   // Aliases for alternate PC+imm syntax of LDR instructions.
7057   case ARM::t2LDRpcrel:
7058     // Select the narrow version if the immediate will fit.
7059     if (Inst.getOperand(1).getImm() > 0 &&
7060         Inst.getOperand(1).getImm() <= 0xff &&
7061         !(static_cast<ARMOperand &>(*Operands[2]).isToken() &&
7062           static_cast<ARMOperand &>(*Operands[2]).getToken() == ".w"))
7063       Inst.setOpcode(ARM::tLDRpci);
7064     else
7065       Inst.setOpcode(ARM::t2LDRpci);
7066     return true;
7067   case ARM::t2LDRBpcrel:
7068     Inst.setOpcode(ARM::t2LDRBpci);
7069     return true;
7070   case ARM::t2LDRHpcrel:
7071     Inst.setOpcode(ARM::t2LDRHpci);
7072     return true;
7073   case ARM::t2LDRSBpcrel:
7074     Inst.setOpcode(ARM::t2LDRSBpci);
7075     return true;
7076   case ARM::t2LDRSHpcrel:
7077     Inst.setOpcode(ARM::t2LDRSHpci);
7078     return true;
7079   case ARM::LDRConstPool:
7080   case ARM::tLDRConstPool:
7081   case ARM::t2LDRConstPool: {
7082     // Pseudo instruction ldr rt, =immediate is converted to a
7083     // MOV rt, immediate if immediate is known and representable
7084     // otherwise we create a constant pool entry that we load from.
7085     MCInst TmpInst;
7086     if (Inst.getOpcode() == ARM::LDRConstPool)
7087       TmpInst.setOpcode(ARM::LDRi12);
7088     else if (Inst.getOpcode() == ARM::tLDRConstPool)
7089       TmpInst.setOpcode(ARM::tLDRpci);
7090     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
7091       TmpInst.setOpcode(ARM::t2LDRpci);
7092     const ARMOperand &PoolOperand =
7093       (static_cast<ARMOperand &>(*Operands[2]).isToken() &&
7094        static_cast<ARMOperand &>(*Operands[2]).getToken() == ".w") ?
7095       static_cast<ARMOperand &>(*Operands[4]) :
7096       static_cast<ARMOperand &>(*Operands[3]);
7097     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
7098     // If SubExprVal is a constant we may be able to use a MOV
7099     if (isa<MCConstantExpr>(SubExprVal) &&
7100         Inst.getOperand(0).getReg() != ARM::PC &&
7101         Inst.getOperand(0).getReg() != ARM::SP) {
7102       int64_t Value =
7103         (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
7104       bool UseMov  = true;
7105       bool MovHasS = true;
7106       if (Inst.getOpcode() == ARM::LDRConstPool) {
7107         // ARM Constant
7108         if (ARM_AM::getSOImmVal(Value) != -1) {
7109           Value = ARM_AM::getSOImmVal(Value);
7110           TmpInst.setOpcode(ARM::MOVi);
7111         }
7112         else if (ARM_AM::getSOImmVal(~Value) != -1) {
7113           Value = ARM_AM::getSOImmVal(~Value);
7114           TmpInst.setOpcode(ARM::MVNi);
7115         }
7116         else if (hasV6T2Ops() &&
7117                  Value >=0 && Value < 65536) {
7118           TmpInst.setOpcode(ARM::MOVi16);
7119           MovHasS = false;
7120         }
7121         else
7122           UseMov = false;
7123       }
7124       else {
7125         // Thumb/Thumb2 Constant
7126         if (hasThumb2() &&
7127             ARM_AM::getT2SOImmVal(Value) != -1)
7128           TmpInst.setOpcode(ARM::t2MOVi);
7129         else if (hasThumb2() &&
7130                  ARM_AM::getT2SOImmVal(~Value) != -1) {
7131           TmpInst.setOpcode(ARM::t2MVNi);
7132           Value = ~Value;
7133         }
7134         else if (hasV8MBaseline() &&
7135                  Value >=0 && Value < 65536) {
7136           TmpInst.setOpcode(ARM::t2MOVi16);
7137           MovHasS = false;
7138         }
7139         else
7140           UseMov = false;
7141       }
7142       if (UseMov) {
7143         TmpInst.addOperand(Inst.getOperand(0));           // Rt
7144         TmpInst.addOperand(MCOperand::createImm(Value));  // Immediate
7145         TmpInst.addOperand(Inst.getOperand(2));           // CondCode
7146         TmpInst.addOperand(Inst.getOperand(3));           // CondCode
7147         if (MovHasS)
7148           TmpInst.addOperand(MCOperand::createReg(0));    // S
7149         Inst = TmpInst;
7150         return true;
7151       }
7152     }
7153     // No opportunity to use MOV/MVN create constant pool
7154     const MCExpr *CPLoc =
7155       getTargetStreamer().addConstantPoolEntry(SubExprVal,
7156                                                PoolOperand.getStartLoc());
7157     TmpInst.addOperand(Inst.getOperand(0));           // Rt
7158     TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
7159     if (TmpInst.getOpcode() == ARM::LDRi12)
7160       TmpInst.addOperand(MCOperand::createImm(0));    // unused offset
7161     TmpInst.addOperand(Inst.getOperand(2));           // CondCode
7162     TmpInst.addOperand(Inst.getOperand(3));           // CondCode
7163     Inst = TmpInst;
7164     return true;
7165   }
7166   // Handle NEON VST complex aliases.
7167   case ARM::VST1LNdWB_register_Asm_8:
7168   case ARM::VST1LNdWB_register_Asm_16:
7169   case ARM::VST1LNdWB_register_Asm_32: {
7170     MCInst TmpInst;
7171     // Shuffle the operands around so the lane index operand is in the
7172     // right place.
7173     unsigned Spacing;
7174     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7175     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7176     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7177     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7178     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7179     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7180     TmpInst.addOperand(Inst.getOperand(1)); // lane
7181     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7182     TmpInst.addOperand(Inst.getOperand(6));
7183     Inst = TmpInst;
7184     return true;
7185   }
7186 
7187   case ARM::VST2LNdWB_register_Asm_8:
7188   case ARM::VST2LNdWB_register_Asm_16:
7189   case ARM::VST2LNdWB_register_Asm_32:
7190   case ARM::VST2LNqWB_register_Asm_16:
7191   case ARM::VST2LNqWB_register_Asm_32: {
7192     MCInst TmpInst;
7193     // Shuffle the operands around so the lane index operand is in the
7194     // right place.
7195     unsigned Spacing;
7196     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7197     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7198     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7199     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7200     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7201     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7202     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7203                                             Spacing));
7204     TmpInst.addOperand(Inst.getOperand(1)); // lane
7205     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7206     TmpInst.addOperand(Inst.getOperand(6));
7207     Inst = TmpInst;
7208     return true;
7209   }
7210 
7211   case ARM::VST3LNdWB_register_Asm_8:
7212   case ARM::VST3LNdWB_register_Asm_16:
7213   case ARM::VST3LNdWB_register_Asm_32:
7214   case ARM::VST3LNqWB_register_Asm_16:
7215   case ARM::VST3LNqWB_register_Asm_32: {
7216     MCInst TmpInst;
7217     // Shuffle the operands around so the lane index operand is in the
7218     // right place.
7219     unsigned Spacing;
7220     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7221     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7222     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7223     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7224     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7225     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7226     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7227                                             Spacing));
7228     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7229                                             Spacing * 2));
7230     TmpInst.addOperand(Inst.getOperand(1)); // lane
7231     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7232     TmpInst.addOperand(Inst.getOperand(6));
7233     Inst = TmpInst;
7234     return true;
7235   }
7236 
7237   case ARM::VST4LNdWB_register_Asm_8:
7238   case ARM::VST4LNdWB_register_Asm_16:
7239   case ARM::VST4LNdWB_register_Asm_32:
7240   case ARM::VST4LNqWB_register_Asm_16:
7241   case ARM::VST4LNqWB_register_Asm_32: {
7242     MCInst TmpInst;
7243     // Shuffle the operands around so the lane index operand is in the
7244     // right place.
7245     unsigned Spacing;
7246     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7247     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7248     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7249     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7250     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7251     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7252     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7253                                             Spacing));
7254     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7255                                             Spacing * 2));
7256     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7257                                             Spacing * 3));
7258     TmpInst.addOperand(Inst.getOperand(1)); // lane
7259     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7260     TmpInst.addOperand(Inst.getOperand(6));
7261     Inst = TmpInst;
7262     return true;
7263   }
7264 
7265   case ARM::VST1LNdWB_fixed_Asm_8:
7266   case ARM::VST1LNdWB_fixed_Asm_16:
7267   case ARM::VST1LNdWB_fixed_Asm_32: {
7268     MCInst TmpInst;
7269     // Shuffle the operands around so the lane index operand is in the
7270     // right place.
7271     unsigned Spacing;
7272     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7273     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7274     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7275     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7276     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7277     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7278     TmpInst.addOperand(Inst.getOperand(1)); // lane
7279     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7280     TmpInst.addOperand(Inst.getOperand(5));
7281     Inst = TmpInst;
7282     return true;
7283   }
7284 
7285   case ARM::VST2LNdWB_fixed_Asm_8:
7286   case ARM::VST2LNdWB_fixed_Asm_16:
7287   case ARM::VST2LNdWB_fixed_Asm_32:
7288   case ARM::VST2LNqWB_fixed_Asm_16:
7289   case ARM::VST2LNqWB_fixed_Asm_32: {
7290     MCInst TmpInst;
7291     // Shuffle the operands around so the lane index operand is in the
7292     // right place.
7293     unsigned Spacing;
7294     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7295     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7296     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7297     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7298     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7299     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7300     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7301                                             Spacing));
7302     TmpInst.addOperand(Inst.getOperand(1)); // lane
7303     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7304     TmpInst.addOperand(Inst.getOperand(5));
7305     Inst = TmpInst;
7306     return true;
7307   }
7308 
7309   case ARM::VST3LNdWB_fixed_Asm_8:
7310   case ARM::VST3LNdWB_fixed_Asm_16:
7311   case ARM::VST3LNdWB_fixed_Asm_32:
7312   case ARM::VST3LNqWB_fixed_Asm_16:
7313   case ARM::VST3LNqWB_fixed_Asm_32: {
7314     MCInst TmpInst;
7315     // Shuffle the operands around so the lane index operand is in the
7316     // right place.
7317     unsigned Spacing;
7318     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7319     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7320     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7321     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7322     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7323     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7324     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7325                                             Spacing));
7326     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7327                                             Spacing * 2));
7328     TmpInst.addOperand(Inst.getOperand(1)); // lane
7329     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7330     TmpInst.addOperand(Inst.getOperand(5));
7331     Inst = TmpInst;
7332     return true;
7333   }
7334 
7335   case ARM::VST4LNdWB_fixed_Asm_8:
7336   case ARM::VST4LNdWB_fixed_Asm_16:
7337   case ARM::VST4LNdWB_fixed_Asm_32:
7338   case ARM::VST4LNqWB_fixed_Asm_16:
7339   case ARM::VST4LNqWB_fixed_Asm_32: {
7340     MCInst TmpInst;
7341     // Shuffle the operands around so the lane index operand is in the
7342     // right place.
7343     unsigned Spacing;
7344     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7345     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7346     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7347     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7348     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7349     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7350     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7351                                             Spacing));
7352     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7353                                             Spacing * 2));
7354     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7355                                             Spacing * 3));
7356     TmpInst.addOperand(Inst.getOperand(1)); // lane
7357     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7358     TmpInst.addOperand(Inst.getOperand(5));
7359     Inst = TmpInst;
7360     return true;
7361   }
7362 
7363   case ARM::VST1LNdAsm_8:
7364   case ARM::VST1LNdAsm_16:
7365   case ARM::VST1LNdAsm_32: {
7366     MCInst TmpInst;
7367     // Shuffle the operands around so the lane index operand is in the
7368     // right place.
7369     unsigned Spacing;
7370     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7371     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7372     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7373     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7374     TmpInst.addOperand(Inst.getOperand(1)); // lane
7375     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7376     TmpInst.addOperand(Inst.getOperand(5));
7377     Inst = TmpInst;
7378     return true;
7379   }
7380 
7381   case ARM::VST2LNdAsm_8:
7382   case ARM::VST2LNdAsm_16:
7383   case ARM::VST2LNdAsm_32:
7384   case ARM::VST2LNqAsm_16:
7385   case ARM::VST2LNqAsm_32: {
7386     MCInst TmpInst;
7387     // Shuffle the operands around so the lane index operand is in the
7388     // right place.
7389     unsigned Spacing;
7390     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7391     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7392     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7393     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7394     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7395                                             Spacing));
7396     TmpInst.addOperand(Inst.getOperand(1)); // lane
7397     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7398     TmpInst.addOperand(Inst.getOperand(5));
7399     Inst = TmpInst;
7400     return true;
7401   }
7402 
7403   case ARM::VST3LNdAsm_8:
7404   case ARM::VST3LNdAsm_16:
7405   case ARM::VST3LNdAsm_32:
7406   case ARM::VST3LNqAsm_16:
7407   case ARM::VST3LNqAsm_32: {
7408     MCInst TmpInst;
7409     // Shuffle the operands around so the lane index operand is in the
7410     // right place.
7411     unsigned Spacing;
7412     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7413     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7414     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7415     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7416     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7417                                             Spacing));
7418     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7419                                             Spacing * 2));
7420     TmpInst.addOperand(Inst.getOperand(1)); // lane
7421     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7422     TmpInst.addOperand(Inst.getOperand(5));
7423     Inst = TmpInst;
7424     return true;
7425   }
7426 
7427   case ARM::VST4LNdAsm_8:
7428   case ARM::VST4LNdAsm_16:
7429   case ARM::VST4LNdAsm_32:
7430   case ARM::VST4LNqAsm_16:
7431   case ARM::VST4LNqAsm_32: {
7432     MCInst TmpInst;
7433     // Shuffle the operands around so the lane index operand is in the
7434     // right place.
7435     unsigned Spacing;
7436     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7437     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7438     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7439     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7440     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7441                                             Spacing));
7442     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7443                                             Spacing * 2));
7444     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7445                                             Spacing * 3));
7446     TmpInst.addOperand(Inst.getOperand(1)); // lane
7447     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7448     TmpInst.addOperand(Inst.getOperand(5));
7449     Inst = TmpInst;
7450     return true;
7451   }
7452 
7453   // Handle NEON VLD complex aliases.
7454   case ARM::VLD1LNdWB_register_Asm_8:
7455   case ARM::VLD1LNdWB_register_Asm_16:
7456   case ARM::VLD1LNdWB_register_Asm_32: {
7457     MCInst TmpInst;
7458     // Shuffle the operands around so the lane index operand is in the
7459     // right place.
7460     unsigned Spacing;
7461     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7462     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7463     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7464     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7465     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7466     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7467     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7468     TmpInst.addOperand(Inst.getOperand(1)); // lane
7469     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7470     TmpInst.addOperand(Inst.getOperand(6));
7471     Inst = TmpInst;
7472     return true;
7473   }
7474 
7475   case ARM::VLD2LNdWB_register_Asm_8:
7476   case ARM::VLD2LNdWB_register_Asm_16:
7477   case ARM::VLD2LNdWB_register_Asm_32:
7478   case ARM::VLD2LNqWB_register_Asm_16:
7479   case ARM::VLD2LNqWB_register_Asm_32: {
7480     MCInst TmpInst;
7481     // Shuffle the operands around so the lane index operand is in the
7482     // right place.
7483     unsigned Spacing;
7484     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7485     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7486     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7487                                             Spacing));
7488     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7489     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7490     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7491     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7492     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7493     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7494                                             Spacing));
7495     TmpInst.addOperand(Inst.getOperand(1)); // lane
7496     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7497     TmpInst.addOperand(Inst.getOperand(6));
7498     Inst = TmpInst;
7499     return true;
7500   }
7501 
7502   case ARM::VLD3LNdWB_register_Asm_8:
7503   case ARM::VLD3LNdWB_register_Asm_16:
7504   case ARM::VLD3LNdWB_register_Asm_32:
7505   case ARM::VLD3LNqWB_register_Asm_16:
7506   case ARM::VLD3LNqWB_register_Asm_32: {
7507     MCInst TmpInst;
7508     // Shuffle the operands around so the lane index operand is in the
7509     // right place.
7510     unsigned Spacing;
7511     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7512     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7513     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7514                                             Spacing));
7515     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7516                                             Spacing * 2));
7517     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7518     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7519     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7520     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7521     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7522     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7523                                             Spacing));
7524     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7525                                             Spacing * 2));
7526     TmpInst.addOperand(Inst.getOperand(1)); // lane
7527     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7528     TmpInst.addOperand(Inst.getOperand(6));
7529     Inst = TmpInst;
7530     return true;
7531   }
7532 
7533   case ARM::VLD4LNdWB_register_Asm_8:
7534   case ARM::VLD4LNdWB_register_Asm_16:
7535   case ARM::VLD4LNdWB_register_Asm_32:
7536   case ARM::VLD4LNqWB_register_Asm_16:
7537   case ARM::VLD4LNqWB_register_Asm_32: {
7538     MCInst TmpInst;
7539     // Shuffle the operands around so the lane index operand is in the
7540     // right place.
7541     unsigned Spacing;
7542     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7543     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7544     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7545                                             Spacing));
7546     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7547                                             Spacing * 2));
7548     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7549                                             Spacing * 3));
7550     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7551     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7552     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7553     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7554     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7555     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7556                                             Spacing));
7557     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7558                                             Spacing * 2));
7559     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7560                                             Spacing * 3));
7561     TmpInst.addOperand(Inst.getOperand(1)); // lane
7562     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7563     TmpInst.addOperand(Inst.getOperand(6));
7564     Inst = TmpInst;
7565     return true;
7566   }
7567 
7568   case ARM::VLD1LNdWB_fixed_Asm_8:
7569   case ARM::VLD1LNdWB_fixed_Asm_16:
7570   case ARM::VLD1LNdWB_fixed_Asm_32: {
7571     MCInst TmpInst;
7572     // Shuffle the operands around so the lane index operand is in the
7573     // right place.
7574     unsigned Spacing;
7575     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7576     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7577     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7578     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7579     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7580     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7581     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7582     TmpInst.addOperand(Inst.getOperand(1)); // lane
7583     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7584     TmpInst.addOperand(Inst.getOperand(5));
7585     Inst = TmpInst;
7586     return true;
7587   }
7588 
7589   case ARM::VLD2LNdWB_fixed_Asm_8:
7590   case ARM::VLD2LNdWB_fixed_Asm_16:
7591   case ARM::VLD2LNdWB_fixed_Asm_32:
7592   case ARM::VLD2LNqWB_fixed_Asm_16:
7593   case ARM::VLD2LNqWB_fixed_Asm_32: {
7594     MCInst TmpInst;
7595     // Shuffle the operands around so the lane index operand is in the
7596     // right place.
7597     unsigned Spacing;
7598     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7599     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7600     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7601                                             Spacing));
7602     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7603     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7604     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7605     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7606     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7607     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7608                                             Spacing));
7609     TmpInst.addOperand(Inst.getOperand(1)); // lane
7610     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7611     TmpInst.addOperand(Inst.getOperand(5));
7612     Inst = TmpInst;
7613     return true;
7614   }
7615 
7616   case ARM::VLD3LNdWB_fixed_Asm_8:
7617   case ARM::VLD3LNdWB_fixed_Asm_16:
7618   case ARM::VLD3LNdWB_fixed_Asm_32:
7619   case ARM::VLD3LNqWB_fixed_Asm_16:
7620   case ARM::VLD3LNqWB_fixed_Asm_32: {
7621     MCInst TmpInst;
7622     // Shuffle the operands around so the lane index operand is in the
7623     // right place.
7624     unsigned Spacing;
7625     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7626     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7627     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7628                                             Spacing));
7629     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7630                                             Spacing * 2));
7631     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7632     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7633     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7634     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7635     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7636     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7637                                             Spacing));
7638     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7639                                             Spacing * 2));
7640     TmpInst.addOperand(Inst.getOperand(1)); // lane
7641     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7642     TmpInst.addOperand(Inst.getOperand(5));
7643     Inst = TmpInst;
7644     return true;
7645   }
7646 
7647   case ARM::VLD4LNdWB_fixed_Asm_8:
7648   case ARM::VLD4LNdWB_fixed_Asm_16:
7649   case ARM::VLD4LNdWB_fixed_Asm_32:
7650   case ARM::VLD4LNqWB_fixed_Asm_16:
7651   case ARM::VLD4LNqWB_fixed_Asm_32: {
7652     MCInst TmpInst;
7653     // Shuffle the operands around so the lane index operand is in the
7654     // right place.
7655     unsigned Spacing;
7656     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7657     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7658     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7659                                             Spacing));
7660     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7661                                             Spacing * 2));
7662     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7663                                             Spacing * 3));
7664     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7665     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7666     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7667     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7668     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7669     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7670                                             Spacing));
7671     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7672                                             Spacing * 2));
7673     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7674                                             Spacing * 3));
7675     TmpInst.addOperand(Inst.getOperand(1)); // lane
7676     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7677     TmpInst.addOperand(Inst.getOperand(5));
7678     Inst = TmpInst;
7679     return true;
7680   }
7681 
7682   case ARM::VLD1LNdAsm_8:
7683   case ARM::VLD1LNdAsm_16:
7684   case ARM::VLD1LNdAsm_32: {
7685     MCInst TmpInst;
7686     // Shuffle the operands around so the lane index operand is in the
7687     // right place.
7688     unsigned Spacing;
7689     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7690     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7691     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7692     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7693     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7694     TmpInst.addOperand(Inst.getOperand(1)); // lane
7695     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7696     TmpInst.addOperand(Inst.getOperand(5));
7697     Inst = TmpInst;
7698     return true;
7699   }
7700 
7701   case ARM::VLD2LNdAsm_8:
7702   case ARM::VLD2LNdAsm_16:
7703   case ARM::VLD2LNdAsm_32:
7704   case ARM::VLD2LNqAsm_16:
7705   case ARM::VLD2LNqAsm_32: {
7706     MCInst TmpInst;
7707     // Shuffle the operands around so the lane index operand is in the
7708     // right place.
7709     unsigned Spacing;
7710     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7711     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7712     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7713                                             Spacing));
7714     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7715     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7716     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7717     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7718                                             Spacing));
7719     TmpInst.addOperand(Inst.getOperand(1)); // lane
7720     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7721     TmpInst.addOperand(Inst.getOperand(5));
7722     Inst = TmpInst;
7723     return true;
7724   }
7725 
7726   case ARM::VLD3LNdAsm_8:
7727   case ARM::VLD3LNdAsm_16:
7728   case ARM::VLD3LNdAsm_32:
7729   case ARM::VLD3LNqAsm_16:
7730   case ARM::VLD3LNqAsm_32: {
7731     MCInst TmpInst;
7732     // Shuffle the operands around so the lane index operand is in the
7733     // right place.
7734     unsigned Spacing;
7735     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7736     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7737     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7738                                             Spacing));
7739     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7740                                             Spacing * 2));
7741     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7742     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7743     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7744     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7745                                             Spacing));
7746     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7747                                             Spacing * 2));
7748     TmpInst.addOperand(Inst.getOperand(1)); // lane
7749     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7750     TmpInst.addOperand(Inst.getOperand(5));
7751     Inst = TmpInst;
7752     return true;
7753   }
7754 
7755   case ARM::VLD4LNdAsm_8:
7756   case ARM::VLD4LNdAsm_16:
7757   case ARM::VLD4LNdAsm_32:
7758   case ARM::VLD4LNqAsm_16:
7759   case ARM::VLD4LNqAsm_32: {
7760     MCInst TmpInst;
7761     // Shuffle the operands around so the lane index operand is in the
7762     // right place.
7763     unsigned Spacing;
7764     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7765     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7766     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7767                                             Spacing));
7768     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7769                                             Spacing * 2));
7770     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7771                                             Spacing * 3));
7772     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7773     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7774     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7775     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7776                                             Spacing));
7777     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7778                                             Spacing * 2));
7779     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7780                                             Spacing * 3));
7781     TmpInst.addOperand(Inst.getOperand(1)); // lane
7782     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7783     TmpInst.addOperand(Inst.getOperand(5));
7784     Inst = TmpInst;
7785     return true;
7786   }
7787 
7788   // VLD3DUP single 3-element structure to all lanes instructions.
7789   case ARM::VLD3DUPdAsm_8:
7790   case ARM::VLD3DUPdAsm_16:
7791   case ARM::VLD3DUPdAsm_32:
7792   case ARM::VLD3DUPqAsm_8:
7793   case ARM::VLD3DUPqAsm_16:
7794   case ARM::VLD3DUPqAsm_32: {
7795     MCInst TmpInst;
7796     unsigned Spacing;
7797     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7798     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7799     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7800                                             Spacing));
7801     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7802                                             Spacing * 2));
7803     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7804     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7805     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7806     TmpInst.addOperand(Inst.getOperand(4));
7807     Inst = TmpInst;
7808     return true;
7809   }
7810 
7811   case ARM::VLD3DUPdWB_fixed_Asm_8:
7812   case ARM::VLD3DUPdWB_fixed_Asm_16:
7813   case ARM::VLD3DUPdWB_fixed_Asm_32:
7814   case ARM::VLD3DUPqWB_fixed_Asm_8:
7815   case ARM::VLD3DUPqWB_fixed_Asm_16:
7816   case ARM::VLD3DUPqWB_fixed_Asm_32: {
7817     MCInst TmpInst;
7818     unsigned Spacing;
7819     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7820     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7821     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7822                                             Spacing));
7823     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7824                                             Spacing * 2));
7825     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7826     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7827     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7828     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7829     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7830     TmpInst.addOperand(Inst.getOperand(4));
7831     Inst = TmpInst;
7832     return true;
7833   }
7834 
7835   case ARM::VLD3DUPdWB_register_Asm_8:
7836   case ARM::VLD3DUPdWB_register_Asm_16:
7837   case ARM::VLD3DUPdWB_register_Asm_32:
7838   case ARM::VLD3DUPqWB_register_Asm_8:
7839   case ARM::VLD3DUPqWB_register_Asm_16:
7840   case ARM::VLD3DUPqWB_register_Asm_32: {
7841     MCInst TmpInst;
7842     unsigned Spacing;
7843     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7844     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7845     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7846                                             Spacing));
7847     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7848                                             Spacing * 2));
7849     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7850     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7851     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7852     TmpInst.addOperand(Inst.getOperand(3)); // Rm
7853     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7854     TmpInst.addOperand(Inst.getOperand(5));
7855     Inst = TmpInst;
7856     return true;
7857   }
7858 
7859   // VLD3 multiple 3-element structure instructions.
7860   case ARM::VLD3dAsm_8:
7861   case ARM::VLD3dAsm_16:
7862   case ARM::VLD3dAsm_32:
7863   case ARM::VLD3qAsm_8:
7864   case ARM::VLD3qAsm_16:
7865   case ARM::VLD3qAsm_32: {
7866     MCInst TmpInst;
7867     unsigned Spacing;
7868     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7869     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7870     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7871                                             Spacing));
7872     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7873                                             Spacing * 2));
7874     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7875     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7876     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7877     TmpInst.addOperand(Inst.getOperand(4));
7878     Inst = TmpInst;
7879     return true;
7880   }
7881 
7882   case ARM::VLD3dWB_fixed_Asm_8:
7883   case ARM::VLD3dWB_fixed_Asm_16:
7884   case ARM::VLD3dWB_fixed_Asm_32:
7885   case ARM::VLD3qWB_fixed_Asm_8:
7886   case ARM::VLD3qWB_fixed_Asm_16:
7887   case ARM::VLD3qWB_fixed_Asm_32: {
7888     MCInst TmpInst;
7889     unsigned Spacing;
7890     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7891     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7892     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7893                                             Spacing));
7894     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7895                                             Spacing * 2));
7896     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7897     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7898     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7899     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7900     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7901     TmpInst.addOperand(Inst.getOperand(4));
7902     Inst = TmpInst;
7903     return true;
7904   }
7905 
7906   case ARM::VLD3dWB_register_Asm_8:
7907   case ARM::VLD3dWB_register_Asm_16:
7908   case ARM::VLD3dWB_register_Asm_32:
7909   case ARM::VLD3qWB_register_Asm_8:
7910   case ARM::VLD3qWB_register_Asm_16:
7911   case ARM::VLD3qWB_register_Asm_32: {
7912     MCInst TmpInst;
7913     unsigned Spacing;
7914     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7915     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7916     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7917                                             Spacing));
7918     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7919                                             Spacing * 2));
7920     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7921     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7922     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7923     TmpInst.addOperand(Inst.getOperand(3)); // Rm
7924     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7925     TmpInst.addOperand(Inst.getOperand(5));
7926     Inst = TmpInst;
7927     return true;
7928   }
7929 
7930   // VLD4DUP single 3-element structure to all lanes instructions.
7931   case ARM::VLD4DUPdAsm_8:
7932   case ARM::VLD4DUPdAsm_16:
7933   case ARM::VLD4DUPdAsm_32:
7934   case ARM::VLD4DUPqAsm_8:
7935   case ARM::VLD4DUPqAsm_16:
7936   case ARM::VLD4DUPqAsm_32: {
7937     MCInst TmpInst;
7938     unsigned Spacing;
7939     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7940     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7941     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7942                                             Spacing));
7943     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7944                                             Spacing * 2));
7945     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7946                                             Spacing * 3));
7947     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7948     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7949     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7950     TmpInst.addOperand(Inst.getOperand(4));
7951     Inst = TmpInst;
7952     return true;
7953   }
7954 
7955   case ARM::VLD4DUPdWB_fixed_Asm_8:
7956   case ARM::VLD4DUPdWB_fixed_Asm_16:
7957   case ARM::VLD4DUPdWB_fixed_Asm_32:
7958   case ARM::VLD4DUPqWB_fixed_Asm_8:
7959   case ARM::VLD4DUPqWB_fixed_Asm_16:
7960   case ARM::VLD4DUPqWB_fixed_Asm_32: {
7961     MCInst TmpInst;
7962     unsigned Spacing;
7963     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7964     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7965     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7966                                             Spacing));
7967     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7968                                             Spacing * 2));
7969     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7970                                             Spacing * 3));
7971     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7972     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7973     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7974     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7975     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7976     TmpInst.addOperand(Inst.getOperand(4));
7977     Inst = TmpInst;
7978     return true;
7979   }
7980 
7981   case ARM::VLD4DUPdWB_register_Asm_8:
7982   case ARM::VLD4DUPdWB_register_Asm_16:
7983   case ARM::VLD4DUPdWB_register_Asm_32:
7984   case ARM::VLD4DUPqWB_register_Asm_8:
7985   case ARM::VLD4DUPqWB_register_Asm_16:
7986   case ARM::VLD4DUPqWB_register_Asm_32: {
7987     MCInst TmpInst;
7988     unsigned Spacing;
7989     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7990     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7991     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7992                                             Spacing));
7993     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7994                                             Spacing * 2));
7995     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7996                                             Spacing * 3));
7997     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7998     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7999     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8000     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8001     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8002     TmpInst.addOperand(Inst.getOperand(5));
8003     Inst = TmpInst;
8004     return true;
8005   }
8006 
8007   // VLD4 multiple 4-element structure instructions.
8008   case ARM::VLD4dAsm_8:
8009   case ARM::VLD4dAsm_16:
8010   case ARM::VLD4dAsm_32:
8011   case ARM::VLD4qAsm_8:
8012   case ARM::VLD4qAsm_16:
8013   case ARM::VLD4qAsm_32: {
8014     MCInst TmpInst;
8015     unsigned Spacing;
8016     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8017     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8018     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8019                                             Spacing));
8020     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8021                                             Spacing * 2));
8022     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8023                                             Spacing * 3));
8024     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8025     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8026     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8027     TmpInst.addOperand(Inst.getOperand(4));
8028     Inst = TmpInst;
8029     return true;
8030   }
8031 
8032   case ARM::VLD4dWB_fixed_Asm_8:
8033   case ARM::VLD4dWB_fixed_Asm_16:
8034   case ARM::VLD4dWB_fixed_Asm_32:
8035   case ARM::VLD4qWB_fixed_Asm_8:
8036   case ARM::VLD4qWB_fixed_Asm_16:
8037   case ARM::VLD4qWB_fixed_Asm_32: {
8038     MCInst TmpInst;
8039     unsigned Spacing;
8040     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8041     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8042     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8043                                             Spacing));
8044     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8045                                             Spacing * 2));
8046     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8047                                             Spacing * 3));
8048     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8049     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8050     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8051     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8052     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8053     TmpInst.addOperand(Inst.getOperand(4));
8054     Inst = TmpInst;
8055     return true;
8056   }
8057 
8058   case ARM::VLD4dWB_register_Asm_8:
8059   case ARM::VLD4dWB_register_Asm_16:
8060   case ARM::VLD4dWB_register_Asm_32:
8061   case ARM::VLD4qWB_register_Asm_8:
8062   case ARM::VLD4qWB_register_Asm_16:
8063   case ARM::VLD4qWB_register_Asm_32: {
8064     MCInst TmpInst;
8065     unsigned Spacing;
8066     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8067     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8068     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8069                                             Spacing));
8070     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8071                                             Spacing * 2));
8072     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8073                                             Spacing * 3));
8074     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8075     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8076     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8077     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8078     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8079     TmpInst.addOperand(Inst.getOperand(5));
8080     Inst = TmpInst;
8081     return true;
8082   }
8083 
8084   // VST3 multiple 3-element structure instructions.
8085   case ARM::VST3dAsm_8:
8086   case ARM::VST3dAsm_16:
8087   case ARM::VST3dAsm_32:
8088   case ARM::VST3qAsm_8:
8089   case ARM::VST3qAsm_16:
8090   case ARM::VST3qAsm_32: {
8091     MCInst TmpInst;
8092     unsigned Spacing;
8093     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8094     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8095     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8096     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8097     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8098                                             Spacing));
8099     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8100                                             Spacing * 2));
8101     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8102     TmpInst.addOperand(Inst.getOperand(4));
8103     Inst = TmpInst;
8104     return true;
8105   }
8106 
8107   case ARM::VST3dWB_fixed_Asm_8:
8108   case ARM::VST3dWB_fixed_Asm_16:
8109   case ARM::VST3dWB_fixed_Asm_32:
8110   case ARM::VST3qWB_fixed_Asm_8:
8111   case ARM::VST3qWB_fixed_Asm_16:
8112   case ARM::VST3qWB_fixed_Asm_32: {
8113     MCInst TmpInst;
8114     unsigned Spacing;
8115     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8116     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8117     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8118     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8119     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8120     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8121     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8122                                             Spacing));
8123     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8124                                             Spacing * 2));
8125     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8126     TmpInst.addOperand(Inst.getOperand(4));
8127     Inst = TmpInst;
8128     return true;
8129   }
8130 
8131   case ARM::VST3dWB_register_Asm_8:
8132   case ARM::VST3dWB_register_Asm_16:
8133   case ARM::VST3dWB_register_Asm_32:
8134   case ARM::VST3qWB_register_Asm_8:
8135   case ARM::VST3qWB_register_Asm_16:
8136   case ARM::VST3qWB_register_Asm_32: {
8137     MCInst TmpInst;
8138     unsigned Spacing;
8139     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8140     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8141     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8142     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8143     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8144     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8145     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8146                                             Spacing));
8147     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8148                                             Spacing * 2));
8149     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8150     TmpInst.addOperand(Inst.getOperand(5));
8151     Inst = TmpInst;
8152     return true;
8153   }
8154 
8155   // VST4 multiple 3-element structure instructions.
8156   case ARM::VST4dAsm_8:
8157   case ARM::VST4dAsm_16:
8158   case ARM::VST4dAsm_32:
8159   case ARM::VST4qAsm_8:
8160   case ARM::VST4qAsm_16:
8161   case ARM::VST4qAsm_32: {
8162     MCInst TmpInst;
8163     unsigned Spacing;
8164     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8165     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8166     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8167     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8168     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8169                                             Spacing));
8170     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8171                                             Spacing * 2));
8172     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8173                                             Spacing * 3));
8174     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8175     TmpInst.addOperand(Inst.getOperand(4));
8176     Inst = TmpInst;
8177     return true;
8178   }
8179 
8180   case ARM::VST4dWB_fixed_Asm_8:
8181   case ARM::VST4dWB_fixed_Asm_16:
8182   case ARM::VST4dWB_fixed_Asm_32:
8183   case ARM::VST4qWB_fixed_Asm_8:
8184   case ARM::VST4qWB_fixed_Asm_16:
8185   case ARM::VST4qWB_fixed_Asm_32: {
8186     MCInst TmpInst;
8187     unsigned Spacing;
8188     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8189     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8190     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8191     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8192     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8193     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8194     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8195                                             Spacing));
8196     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8197                                             Spacing * 2));
8198     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8199                                             Spacing * 3));
8200     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8201     TmpInst.addOperand(Inst.getOperand(4));
8202     Inst = TmpInst;
8203     return true;
8204   }
8205 
8206   case ARM::VST4dWB_register_Asm_8:
8207   case ARM::VST4dWB_register_Asm_16:
8208   case ARM::VST4dWB_register_Asm_32:
8209   case ARM::VST4qWB_register_Asm_8:
8210   case ARM::VST4qWB_register_Asm_16:
8211   case ARM::VST4qWB_register_Asm_32: {
8212     MCInst TmpInst;
8213     unsigned Spacing;
8214     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8215     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8216     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8217     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8218     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8219     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8220     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8221                                             Spacing));
8222     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8223                                             Spacing * 2));
8224     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8225                                             Spacing * 3));
8226     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8227     TmpInst.addOperand(Inst.getOperand(5));
8228     Inst = TmpInst;
8229     return true;
8230   }
8231 
8232   // Handle encoding choice for the shift-immediate instructions.
8233   case ARM::t2LSLri:
8234   case ARM::t2LSRri:
8235   case ARM::t2ASRri: {
8236     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8237         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8238         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8239         !(static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8240           static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w")) {
8241       unsigned NewOpc;
8242       switch (Inst.getOpcode()) {
8243       default: llvm_unreachable("unexpected opcode");
8244       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
8245       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
8246       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
8247       }
8248       // The Thumb1 operands aren't in the same order. Awesome, eh?
8249       MCInst TmpInst;
8250       TmpInst.setOpcode(NewOpc);
8251       TmpInst.addOperand(Inst.getOperand(0));
8252       TmpInst.addOperand(Inst.getOperand(5));
8253       TmpInst.addOperand(Inst.getOperand(1));
8254       TmpInst.addOperand(Inst.getOperand(2));
8255       TmpInst.addOperand(Inst.getOperand(3));
8256       TmpInst.addOperand(Inst.getOperand(4));
8257       Inst = TmpInst;
8258       return true;
8259     }
8260     return false;
8261   }
8262 
8263   // Handle the Thumb2 mode MOV complex aliases.
8264   case ARM::t2MOVsr:
8265   case ARM::t2MOVSsr: {
8266     // Which instruction to expand to depends on the CCOut operand and
8267     // whether we're in an IT block if the register operands are low
8268     // registers.
8269     bool isNarrow = false;
8270     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8271         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8272         isARMLowRegister(Inst.getOperand(2).getReg()) &&
8273         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8274         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
8275       isNarrow = true;
8276     MCInst TmpInst;
8277     unsigned newOpc;
8278     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
8279     default: llvm_unreachable("unexpected opcode!");
8280     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
8281     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
8282     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
8283     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
8284     }
8285     TmpInst.setOpcode(newOpc);
8286     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8287     if (isNarrow)
8288       TmpInst.addOperand(MCOperand::createReg(
8289           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
8290     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8291     TmpInst.addOperand(Inst.getOperand(2)); // Rm
8292     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8293     TmpInst.addOperand(Inst.getOperand(5));
8294     if (!isNarrow)
8295       TmpInst.addOperand(MCOperand::createReg(
8296           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
8297     Inst = TmpInst;
8298     return true;
8299   }
8300   case ARM::t2MOVsi:
8301   case ARM::t2MOVSsi: {
8302     // Which instruction to expand to depends on the CCOut operand and
8303     // whether we're in an IT block if the register operands are low
8304     // registers.
8305     bool isNarrow = false;
8306     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8307         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8308         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
8309       isNarrow = true;
8310     MCInst TmpInst;
8311     unsigned newOpc;
8312     switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
8313     default: llvm_unreachable("unexpected opcode!");
8314     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
8315     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
8316     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
8317     case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
8318     case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
8319     }
8320     unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
8321     if (Amount == 32) Amount = 0;
8322     TmpInst.setOpcode(newOpc);
8323     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8324     if (isNarrow)
8325       TmpInst.addOperand(MCOperand::createReg(
8326           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
8327     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8328     if (newOpc != ARM::t2RRX)
8329       TmpInst.addOperand(MCOperand::createImm(Amount));
8330     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8331     TmpInst.addOperand(Inst.getOperand(4));
8332     if (!isNarrow)
8333       TmpInst.addOperand(MCOperand::createReg(
8334           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
8335     Inst = TmpInst;
8336     return true;
8337   }
8338   // Handle the ARM mode MOV complex aliases.
8339   case ARM::ASRr:
8340   case ARM::LSRr:
8341   case ARM::LSLr:
8342   case ARM::RORr: {
8343     ARM_AM::ShiftOpc ShiftTy;
8344     switch(Inst.getOpcode()) {
8345     default: llvm_unreachable("unexpected opcode!");
8346     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
8347     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
8348     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
8349     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
8350     }
8351     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
8352     MCInst TmpInst;
8353     TmpInst.setOpcode(ARM::MOVsr);
8354     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8355     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8356     TmpInst.addOperand(Inst.getOperand(2)); // Rm
8357     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8358     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8359     TmpInst.addOperand(Inst.getOperand(4));
8360     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
8361     Inst = TmpInst;
8362     return true;
8363   }
8364   case ARM::ASRi:
8365   case ARM::LSRi:
8366   case ARM::LSLi:
8367   case ARM::RORi: {
8368     ARM_AM::ShiftOpc ShiftTy;
8369     switch(Inst.getOpcode()) {
8370     default: llvm_unreachable("unexpected opcode!");
8371     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
8372     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
8373     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
8374     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
8375     }
8376     // A shift by zero is a plain MOVr, not a MOVsi.
8377     unsigned Amt = Inst.getOperand(2).getImm();
8378     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
8379     // A shift by 32 should be encoded as 0 when permitted
8380     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
8381       Amt = 0;
8382     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
8383     MCInst TmpInst;
8384     TmpInst.setOpcode(Opc);
8385     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8386     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8387     if (Opc == ARM::MOVsi)
8388       TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8389     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8390     TmpInst.addOperand(Inst.getOperand(4));
8391     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
8392     Inst = TmpInst;
8393     return true;
8394   }
8395   case ARM::RRXi: {
8396     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
8397     MCInst TmpInst;
8398     TmpInst.setOpcode(ARM::MOVsi);
8399     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8400     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8401     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8402     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8403     TmpInst.addOperand(Inst.getOperand(3));
8404     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
8405     Inst = TmpInst;
8406     return true;
8407   }
8408   case ARM::t2LDMIA_UPD: {
8409     // If this is a load of a single register, then we should use
8410     // a post-indexed LDR instruction instead, per the ARM ARM.
8411     if (Inst.getNumOperands() != 5)
8412       return false;
8413     MCInst TmpInst;
8414     TmpInst.setOpcode(ARM::t2LDR_POST);
8415     TmpInst.addOperand(Inst.getOperand(4)); // Rt
8416     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8417     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8418     TmpInst.addOperand(MCOperand::createImm(4));
8419     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8420     TmpInst.addOperand(Inst.getOperand(3));
8421     Inst = TmpInst;
8422     return true;
8423   }
8424   case ARM::t2STMDB_UPD: {
8425     // If this is a store of a single register, then we should use
8426     // a pre-indexed STR instruction instead, per the ARM ARM.
8427     if (Inst.getNumOperands() != 5)
8428       return false;
8429     MCInst TmpInst;
8430     TmpInst.setOpcode(ARM::t2STR_PRE);
8431     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8432     TmpInst.addOperand(Inst.getOperand(4)); // Rt
8433     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8434     TmpInst.addOperand(MCOperand::createImm(-4));
8435     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8436     TmpInst.addOperand(Inst.getOperand(3));
8437     Inst = TmpInst;
8438     return true;
8439   }
8440   case ARM::LDMIA_UPD:
8441     // If this is a load of a single register via a 'pop', then we should use
8442     // a post-indexed LDR instruction instead, per the ARM ARM.
8443     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
8444         Inst.getNumOperands() == 5) {
8445       MCInst TmpInst;
8446       TmpInst.setOpcode(ARM::LDR_POST_IMM);
8447       TmpInst.addOperand(Inst.getOperand(4)); // Rt
8448       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8449       TmpInst.addOperand(Inst.getOperand(1)); // Rn
8450       TmpInst.addOperand(MCOperand::createReg(0));  // am2offset
8451       TmpInst.addOperand(MCOperand::createImm(4));
8452       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8453       TmpInst.addOperand(Inst.getOperand(3));
8454       Inst = TmpInst;
8455       return true;
8456     }
8457     break;
8458   case ARM::STMDB_UPD:
8459     // If this is a store of a single register via a 'push', then we should use
8460     // a pre-indexed STR instruction instead, per the ARM ARM.
8461     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
8462         Inst.getNumOperands() == 5) {
8463       MCInst TmpInst;
8464       TmpInst.setOpcode(ARM::STR_PRE_IMM);
8465       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8466       TmpInst.addOperand(Inst.getOperand(4)); // Rt
8467       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
8468       TmpInst.addOperand(MCOperand::createImm(-4));
8469       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8470       TmpInst.addOperand(Inst.getOperand(3));
8471       Inst = TmpInst;
8472     }
8473     break;
8474   case ARM::t2ADDri12:
8475     // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
8476     // mnemonic was used (not "addw"), encoding T3 is preferred.
8477     if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" ||
8478         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8479       break;
8480     Inst.setOpcode(ARM::t2ADDri);
8481     Inst.addOperand(MCOperand::createReg(0)); // cc_out
8482     break;
8483   case ARM::t2SUBri12:
8484     // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
8485     // mnemonic was used (not "subw"), encoding T3 is preferred.
8486     if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" ||
8487         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8488       break;
8489     Inst.setOpcode(ARM::t2SUBri);
8490     Inst.addOperand(MCOperand::createReg(0)); // cc_out
8491     break;
8492   case ARM::tADDi8:
8493     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8494     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8495     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8496     // to encoding T1 if <Rd> is omitted."
8497     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8498       Inst.setOpcode(ARM::tADDi3);
8499       return true;
8500     }
8501     break;
8502   case ARM::tSUBi8:
8503     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8504     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8505     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8506     // to encoding T1 if <Rd> is omitted."
8507     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8508       Inst.setOpcode(ARM::tSUBi3);
8509       return true;
8510     }
8511     break;
8512   case ARM::t2ADDri:
8513   case ARM::t2SUBri: {
8514     // If the destination and first source operand are the same, and
8515     // the flags are compatible with the current IT status, use encoding T2
8516     // instead of T3. For compatibility with the system 'as'. Make sure the
8517     // wide encoding wasn't explicit.
8518     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
8519         !isARMLowRegister(Inst.getOperand(0).getReg()) ||
8520         (unsigned)Inst.getOperand(2).getImm() > 255 ||
8521         ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
8522          (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
8523         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8524          static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w"))
8525       break;
8526     MCInst TmpInst;
8527     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
8528                       ARM::tADDi8 : ARM::tSUBi8);
8529     TmpInst.addOperand(Inst.getOperand(0));
8530     TmpInst.addOperand(Inst.getOperand(5));
8531     TmpInst.addOperand(Inst.getOperand(0));
8532     TmpInst.addOperand(Inst.getOperand(2));
8533     TmpInst.addOperand(Inst.getOperand(3));
8534     TmpInst.addOperand(Inst.getOperand(4));
8535     Inst = TmpInst;
8536     return true;
8537   }
8538   case ARM::t2ADDrr: {
8539     // If the destination and first source operand are the same, and
8540     // there's no setting of the flags, use encoding T2 instead of T3.
8541     // Note that this is only for ADD, not SUB. This mirrors the system
8542     // 'as' behaviour.  Also take advantage of ADD being commutative.
8543     // Make sure the wide encoding wasn't explicit.
8544     bool Swap = false;
8545     auto DestReg = Inst.getOperand(0).getReg();
8546     bool Transform = DestReg == Inst.getOperand(1).getReg();
8547     if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
8548       Transform = true;
8549       Swap = true;
8550     }
8551     if (!Transform ||
8552         Inst.getOperand(5).getReg() != 0 ||
8553         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8554          static_cast<ARMOperand &>(*Operands[3]).getToken() == ".w"))
8555       break;
8556     MCInst TmpInst;
8557     TmpInst.setOpcode(ARM::tADDhirr);
8558     TmpInst.addOperand(Inst.getOperand(0));
8559     TmpInst.addOperand(Inst.getOperand(0));
8560     TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
8561     TmpInst.addOperand(Inst.getOperand(3));
8562     TmpInst.addOperand(Inst.getOperand(4));
8563     Inst = TmpInst;
8564     return true;
8565   }
8566   case ARM::tADDrSP: {
8567     // If the non-SP source operand and the destination operand are not the
8568     // same, we need to use the 32-bit encoding if it's available.
8569     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8570       Inst.setOpcode(ARM::t2ADDrr);
8571       Inst.addOperand(MCOperand::createReg(0)); // cc_out
8572       return true;
8573     }
8574     break;
8575   }
8576   case ARM::tB:
8577     // A Thumb conditional branch outside of an IT block is a tBcc.
8578     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
8579       Inst.setOpcode(ARM::tBcc);
8580       return true;
8581     }
8582     break;
8583   case ARM::t2B:
8584     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
8585     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
8586       Inst.setOpcode(ARM::t2Bcc);
8587       return true;
8588     }
8589     break;
8590   case ARM::t2Bcc:
8591     // If the conditional is AL or we're in an IT block, we really want t2B.
8592     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
8593       Inst.setOpcode(ARM::t2B);
8594       return true;
8595     }
8596     break;
8597   case ARM::tBcc:
8598     // If the conditional is AL, we really want tB.
8599     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
8600       Inst.setOpcode(ARM::tB);
8601       return true;
8602     }
8603     break;
8604   case ARM::tLDMIA: {
8605     // If the register list contains any high registers, or if the writeback
8606     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
8607     // instead if we're in Thumb2. Otherwise, this should have generated
8608     // an error in validateInstruction().
8609     unsigned Rn = Inst.getOperand(0).getReg();
8610     bool hasWritebackToken =
8611         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8612          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
8613     bool listContainsBase;
8614     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
8615         (!listContainsBase && !hasWritebackToken) ||
8616         (listContainsBase && hasWritebackToken)) {
8617       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8618       assert (isThumbTwo());
8619       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
8620       // If we're switching to the updating version, we need to insert
8621       // the writeback tied operand.
8622       if (hasWritebackToken)
8623         Inst.insert(Inst.begin(),
8624                     MCOperand::createReg(Inst.getOperand(0).getReg()));
8625       return true;
8626     }
8627     break;
8628   }
8629   case ARM::tSTMIA_UPD: {
8630     // If the register list contains any high registers, we need to use
8631     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8632     // should have generated an error in validateInstruction().
8633     unsigned Rn = Inst.getOperand(0).getReg();
8634     bool listContainsBase;
8635     if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
8636       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8637       assert (isThumbTwo());
8638       Inst.setOpcode(ARM::t2STMIA_UPD);
8639       return true;
8640     }
8641     break;
8642   }
8643   case ARM::tPOP: {
8644     bool listContainsBase;
8645     // If the register list contains any high registers, we need to use
8646     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8647     // should have generated an error in validateInstruction().
8648     if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
8649       return false;
8650     assert (isThumbTwo());
8651     Inst.setOpcode(ARM::t2LDMIA_UPD);
8652     // Add the base register and writeback operands.
8653     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8654     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8655     return true;
8656   }
8657   case ARM::tPUSH: {
8658     bool listContainsBase;
8659     if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
8660       return false;
8661     assert (isThumbTwo());
8662     Inst.setOpcode(ARM::t2STMDB_UPD);
8663     // Add the base register and writeback operands.
8664     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8665     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8666     return true;
8667   }
8668   case ARM::t2MOVi: {
8669     // If we can use the 16-bit encoding and the user didn't explicitly
8670     // request the 32-bit variant, transform it here.
8671     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8672         (unsigned)Inst.getOperand(1).getImm() <= 255 &&
8673         ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
8674           Inst.getOperand(4).getReg() == ARM::CPSR) ||
8675          (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
8676         (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
8677          static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
8678       // The operands aren't in the same order for tMOVi8...
8679       MCInst TmpInst;
8680       TmpInst.setOpcode(ARM::tMOVi8);
8681       TmpInst.addOperand(Inst.getOperand(0));
8682       TmpInst.addOperand(Inst.getOperand(4));
8683       TmpInst.addOperand(Inst.getOperand(1));
8684       TmpInst.addOperand(Inst.getOperand(2));
8685       TmpInst.addOperand(Inst.getOperand(3));
8686       Inst = TmpInst;
8687       return true;
8688     }
8689     break;
8690   }
8691   case ARM::t2MOVr: {
8692     // If we can use the 16-bit encoding and the user didn't explicitly
8693     // request the 32-bit variant, transform it here.
8694     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8695         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8696         Inst.getOperand(2).getImm() == ARMCC::AL &&
8697         Inst.getOperand(4).getReg() == ARM::CPSR &&
8698         (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
8699          static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
8700       // The operands aren't the same for tMOV[S]r... (no cc_out)
8701       MCInst TmpInst;
8702       TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
8703       TmpInst.addOperand(Inst.getOperand(0));
8704       TmpInst.addOperand(Inst.getOperand(1));
8705       TmpInst.addOperand(Inst.getOperand(2));
8706       TmpInst.addOperand(Inst.getOperand(3));
8707       Inst = TmpInst;
8708       return true;
8709     }
8710     break;
8711   }
8712   case ARM::t2SXTH:
8713   case ARM::t2SXTB:
8714   case ARM::t2UXTH:
8715   case ARM::t2UXTB: {
8716     // If we can use the 16-bit encoding and the user didn't explicitly
8717     // request the 32-bit variant, transform it here.
8718     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8719         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8720         Inst.getOperand(2).getImm() == 0 &&
8721         (!static_cast<ARMOperand &>(*Operands[2]).isToken() ||
8722          static_cast<ARMOperand &>(*Operands[2]).getToken() != ".w")) {
8723       unsigned NewOpc;
8724       switch (Inst.getOpcode()) {
8725       default: llvm_unreachable("Illegal opcode!");
8726       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
8727       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
8728       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
8729       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
8730       }
8731       // The operands aren't the same for thumb1 (no rotate operand).
8732       MCInst TmpInst;
8733       TmpInst.setOpcode(NewOpc);
8734       TmpInst.addOperand(Inst.getOperand(0));
8735       TmpInst.addOperand(Inst.getOperand(1));
8736       TmpInst.addOperand(Inst.getOperand(3));
8737       TmpInst.addOperand(Inst.getOperand(4));
8738       Inst = TmpInst;
8739       return true;
8740     }
8741     break;
8742   }
8743   case ARM::MOVsi: {
8744     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
8745     // rrx shifts and asr/lsr of #32 is encoded as 0
8746     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
8747       return false;
8748     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
8749       // Shifting by zero is accepted as a vanilla 'MOVr'
8750       MCInst TmpInst;
8751       TmpInst.setOpcode(ARM::MOVr);
8752       TmpInst.addOperand(Inst.getOperand(0));
8753       TmpInst.addOperand(Inst.getOperand(1));
8754       TmpInst.addOperand(Inst.getOperand(3));
8755       TmpInst.addOperand(Inst.getOperand(4));
8756       TmpInst.addOperand(Inst.getOperand(5));
8757       Inst = TmpInst;
8758       return true;
8759     }
8760     return false;
8761   }
8762   case ARM::ANDrsi:
8763   case ARM::ORRrsi:
8764   case ARM::EORrsi:
8765   case ARM::BICrsi:
8766   case ARM::SUBrsi:
8767   case ARM::ADDrsi: {
8768     unsigned newOpc;
8769     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
8770     if (SOpc == ARM_AM::rrx) return false;
8771     switch (Inst.getOpcode()) {
8772     default: llvm_unreachable("unexpected opcode!");
8773     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
8774     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
8775     case ARM::EORrsi: newOpc = ARM::EORrr; break;
8776     case ARM::BICrsi: newOpc = ARM::BICrr; break;
8777     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
8778     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
8779     }
8780     // If the shift is by zero, use the non-shifted instruction definition.
8781     // The exception is for right shifts, where 0 == 32
8782     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
8783         !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
8784       MCInst TmpInst;
8785       TmpInst.setOpcode(newOpc);
8786       TmpInst.addOperand(Inst.getOperand(0));
8787       TmpInst.addOperand(Inst.getOperand(1));
8788       TmpInst.addOperand(Inst.getOperand(2));
8789       TmpInst.addOperand(Inst.getOperand(4));
8790       TmpInst.addOperand(Inst.getOperand(5));
8791       TmpInst.addOperand(Inst.getOperand(6));
8792       Inst = TmpInst;
8793       return true;
8794     }
8795     return false;
8796   }
8797   case ARM::ITasm:
8798   case ARM::t2IT: {
8799     MCOperand &MO = Inst.getOperand(1);
8800     unsigned Mask = MO.getImm();
8801     ARMCC::CondCodes Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
8802 
8803     // Set up the IT block state according to the IT instruction we just
8804     // matched.
8805     assert(!inITBlock() && "nested IT blocks?!");
8806     startExplicitITBlock(Cond, Mask);
8807     MO.setImm(getITMaskEncoding());
8808     break;
8809   }
8810   case ARM::t2LSLrr:
8811   case ARM::t2LSRrr:
8812   case ARM::t2ASRrr:
8813   case ARM::t2SBCrr:
8814   case ARM::t2RORrr:
8815   case ARM::t2BICrr:
8816   {
8817     // Assemblers should use the narrow encodings of these instructions when permissible.
8818     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
8819          isARMLowRegister(Inst.getOperand(2).getReg())) &&
8820         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8821         ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
8822          (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
8823         (!static_cast<ARMOperand &>(*Operands[3]).isToken() ||
8824          !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower(
8825              ".w"))) {
8826       unsigned NewOpc;
8827       switch (Inst.getOpcode()) {
8828         default: llvm_unreachable("unexpected opcode");
8829         case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
8830         case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
8831         case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
8832         case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
8833         case ARM::t2RORrr: NewOpc = ARM::tROR; break;
8834         case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
8835       }
8836       MCInst TmpInst;
8837       TmpInst.setOpcode(NewOpc);
8838       TmpInst.addOperand(Inst.getOperand(0));
8839       TmpInst.addOperand(Inst.getOperand(5));
8840       TmpInst.addOperand(Inst.getOperand(1));
8841       TmpInst.addOperand(Inst.getOperand(2));
8842       TmpInst.addOperand(Inst.getOperand(3));
8843       TmpInst.addOperand(Inst.getOperand(4));
8844       Inst = TmpInst;
8845       return true;
8846     }
8847     return false;
8848   }
8849   case ARM::t2ANDrr:
8850   case ARM::t2EORrr:
8851   case ARM::t2ADCrr:
8852   case ARM::t2ORRrr:
8853   {
8854     // Assemblers should use the narrow encodings of these instructions when permissible.
8855     // These instructions are special in that they are commutable, so shorter encodings
8856     // are available more often.
8857     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
8858          isARMLowRegister(Inst.getOperand(2).getReg())) &&
8859         (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
8860          Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
8861         ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
8862          (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
8863         (!static_cast<ARMOperand &>(*Operands[3]).isToken() ||
8864          !static_cast<ARMOperand &>(*Operands[3]).getToken().equals_lower(
8865              ".w"))) {
8866       unsigned NewOpc;
8867       switch (Inst.getOpcode()) {
8868         default: llvm_unreachable("unexpected opcode");
8869         case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
8870         case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
8871         case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
8872         case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
8873       }
8874       MCInst TmpInst;
8875       TmpInst.setOpcode(NewOpc);
8876       TmpInst.addOperand(Inst.getOperand(0));
8877       TmpInst.addOperand(Inst.getOperand(5));
8878       if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
8879         TmpInst.addOperand(Inst.getOperand(1));
8880         TmpInst.addOperand(Inst.getOperand(2));
8881       } else {
8882         TmpInst.addOperand(Inst.getOperand(2));
8883         TmpInst.addOperand(Inst.getOperand(1));
8884       }
8885       TmpInst.addOperand(Inst.getOperand(3));
8886       TmpInst.addOperand(Inst.getOperand(4));
8887       Inst = TmpInst;
8888       return true;
8889     }
8890     return false;
8891   }
8892   }
8893   return false;
8894 }
8895 
8896 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
8897   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
8898   // suffix depending on whether they're in an IT block or not.
8899   unsigned Opc = Inst.getOpcode();
8900   const MCInstrDesc &MCID = MII.get(Opc);
8901   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
8902     assert(MCID.hasOptionalDef() &&
8903            "optionally flag setting instruction missing optional def operand");
8904     assert(MCID.NumOperands == Inst.getNumOperands() &&
8905            "operand count mismatch!");
8906     // Find the optional-def operand (cc_out).
8907     unsigned OpNo;
8908     for (OpNo = 0;
8909          !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
8910          ++OpNo)
8911       ;
8912     // If we're parsing Thumb1, reject it completely.
8913     if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
8914       return Match_MnemonicFail;
8915     // If we're parsing Thumb2, which form is legal depends on whether we're
8916     // in an IT block.
8917     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
8918         !inITBlock())
8919       return Match_RequiresITBlock;
8920     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
8921         inITBlock())
8922       return Match_RequiresNotITBlock;
8923   } else if (isThumbOne()) {
8924     // Some high-register supporting Thumb1 encodings only allow both registers
8925     // to be from r0-r7 when in Thumb2.
8926     if (Opc == ARM::tADDhirr && !hasV6MOps() &&
8927         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8928         isARMLowRegister(Inst.getOperand(2).getReg()))
8929       return Match_RequiresThumb2;
8930     // Others only require ARMv6 or later.
8931     else if (Opc == ARM::tMOVr && !hasV6Ops() &&
8932              isARMLowRegister(Inst.getOperand(0).getReg()) &&
8933              isARMLowRegister(Inst.getOperand(1).getReg()))
8934       return Match_RequiresV6;
8935   }
8936 
8937   for (unsigned I = 0; I < MCID.NumOperands; ++I)
8938     if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
8939       // rGPRRegClass excludes PC, and also excluded SP before ARMv8
8940       if ((Inst.getOperand(I).getReg() == ARM::SP) && !hasV8Ops())
8941         return Match_RequiresV8;
8942       else if (Inst.getOperand(I).getReg() == ARM::PC)
8943         return Match_InvalidOperand;
8944     }
8945 
8946   return Match_Success;
8947 }
8948 
8949 namespace llvm {
8950 template <> inline bool IsCPSRDead<MCInst>(MCInst *Instr) {
8951   return true; // In an assembly source, no need to second-guess
8952 }
8953 }
8954 
8955 // Returns true if Inst is unpredictable if it is in and IT block, but is not
8956 // the last instruction in the block.
8957 bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
8958   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
8959 
8960   // All branch & call instructions terminate IT blocks.
8961   if (MCID.isTerminator() || MCID.isCall() || MCID.isReturn() ||
8962       MCID.isBranch() || MCID.isIndirectBranch())
8963     return true;
8964 
8965   // Any arithmetic instruction which writes to the PC also terminates the IT
8966   // block.
8967   for (unsigned OpIdx = 0; OpIdx < MCID.getNumDefs(); ++OpIdx) {
8968     MCOperand &Op = Inst.getOperand(OpIdx);
8969     if (Op.isReg() && Op.getReg() == ARM::PC)
8970       return true;
8971   }
8972 
8973   if (MCID.hasImplicitDefOfPhysReg(ARM::PC, MRI))
8974     return true;
8975 
8976   // Instructions with variable operand lists, which write to the variable
8977   // operands. We only care about Thumb instructions here, as ARM instructions
8978   // obviously can't be in an IT block.
8979   switch (Inst.getOpcode()) {
8980   case ARM::t2LDMIA:
8981   case ARM::t2LDMIA_UPD:
8982   case ARM::t2LDMDB:
8983   case ARM::t2LDMDB_UPD:
8984     if (listContainsReg(Inst, 3, ARM::PC))
8985       return true;
8986     break;
8987   case ARM::tPOP:
8988     if (listContainsReg(Inst, 2, ARM::PC))
8989       return true;
8990     break;
8991   }
8992 
8993   return false;
8994 }
8995 
8996 unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
8997                                           uint64_t &ErrorInfo,
8998                                           bool MatchingInlineAsm,
8999                                           bool &EmitInITBlock,
9000                                           MCStreamer &Out) {
9001   // If we can't use an implicit IT block here, just match as normal.
9002   if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
9003     return MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm);
9004 
9005   // Try to match the instruction in an extension of the current IT block (if
9006   // there is one).
9007   if (inImplicitITBlock()) {
9008     extendImplicitITBlock(ITState.Cond);
9009     if (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm) ==
9010             Match_Success) {
9011       // The match succeded, but we still have to check that the instruction is
9012       // valid in this implicit IT block.
9013       const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9014       if (MCID.isPredicable()) {
9015         ARMCC::CondCodes InstCond =
9016             (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9017                 .getImm();
9018         ARMCC::CondCodes ITCond = currentITCond();
9019         if (InstCond == ITCond) {
9020           EmitInITBlock = true;
9021           return Match_Success;
9022         } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
9023           invertCurrentITCondition();
9024           EmitInITBlock = true;
9025           return Match_Success;
9026         }
9027       }
9028     }
9029     rewindImplicitITPosition();
9030   }
9031 
9032   // Finish the current IT block, and try to match outside any IT block.
9033   flushPendingInstructions(Out);
9034   unsigned PlainMatchResult =
9035       MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm);
9036   if (PlainMatchResult == Match_Success) {
9037     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9038     if (MCID.isPredicable()) {
9039       ARMCC::CondCodes InstCond =
9040           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9041               .getImm();
9042       // Some forms of the branch instruction have their own condition code
9043       // fields, so can be conditionally executed without an IT block.
9044       if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
9045         EmitInITBlock = false;
9046         return Match_Success;
9047       }
9048       if (InstCond == ARMCC::AL) {
9049         EmitInITBlock = false;
9050         return Match_Success;
9051       }
9052     } else {
9053       EmitInITBlock = false;
9054       return Match_Success;
9055     }
9056   }
9057 
9058   // Try to match in a new IT block. The matcher doesn't check the actual
9059   // condition, so we create an IT block with a dummy condition, and fix it up
9060   // once we know the actual condition.
9061   startImplicitITBlock();
9062   if (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm) ==
9063       Match_Success) {
9064     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9065     if (MCID.isPredicable()) {
9066       ITState.Cond =
9067           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9068               .getImm();
9069       EmitInITBlock = true;
9070       return Match_Success;
9071     }
9072   }
9073   discardImplicitITBlock();
9074 
9075   // If none of these succeed, return the error we got when trying to match
9076   // outside any IT blocks.
9077   EmitInITBlock = false;
9078   return PlainMatchResult;
9079 }
9080 
9081 static const char *getSubtargetFeatureName(uint64_t Val);
9082 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
9083                                            OperandVector &Operands,
9084                                            MCStreamer &Out, uint64_t &ErrorInfo,
9085                                            bool MatchingInlineAsm) {
9086   MCInst Inst;
9087   unsigned MatchResult;
9088   bool PendConditionalInstruction = false;
9089 
9090   MatchResult = MatchInstruction(Operands, Inst, ErrorInfo, MatchingInlineAsm,
9091                                  PendConditionalInstruction, Out);
9092 
9093   switch (MatchResult) {
9094   case Match_Success:
9095     // Context sensitive operand constraints aren't handled by the matcher,
9096     // so check them here.
9097     if (validateInstruction(Inst, Operands)) {
9098       // Still progress the IT block, otherwise one wrong condition causes
9099       // nasty cascading errors.
9100       forwardITPosition();
9101       return true;
9102     }
9103 
9104     { // processInstruction() updates inITBlock state, we need to save it away
9105       bool wasInITBlock = inITBlock();
9106 
9107       // Some instructions need post-processing to, for example, tweak which
9108       // encoding is selected. Loop on it while changes happen so the
9109       // individual transformations can chain off each other. E.g.,
9110       // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
9111       while (processInstruction(Inst, Operands, Out))
9112         ;
9113 
9114       // Only after the instruction is fully processed, we can validate it
9115       if (wasInITBlock && hasV8Ops() && isThumb() &&
9116           !isV8EligibleForIT(&Inst)) {
9117         Warning(IDLoc, "deprecated instruction in IT block");
9118       }
9119     }
9120 
9121     // Only move forward at the very end so that everything in validate
9122     // and process gets a consistent answer about whether we're in an IT
9123     // block.
9124     forwardITPosition();
9125 
9126     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
9127     // doesn't actually encode.
9128     if (Inst.getOpcode() == ARM::ITasm)
9129       return false;
9130 
9131     Inst.setLoc(IDLoc);
9132     if (PendConditionalInstruction) {
9133       PendingConditionalInsts.push_back(Inst);
9134       if (isITBlockFull() || isITBlockTerminator(Inst))
9135         flushPendingInstructions(Out);
9136     } else {
9137       Out.EmitInstruction(Inst, getSTI());
9138     }
9139     return false;
9140   case Match_MissingFeature: {
9141     assert(ErrorInfo && "Unknown missing feature!");
9142     // Special case the error message for the very common case where only
9143     // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
9144     std::string Msg = "instruction requires:";
9145     uint64_t Mask = 1;
9146     for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
9147       if (ErrorInfo & Mask) {
9148         Msg += " ";
9149         Msg += getSubtargetFeatureName(ErrorInfo & Mask);
9150       }
9151       Mask <<= 1;
9152     }
9153     return Error(IDLoc, Msg);
9154   }
9155   case Match_InvalidOperand: {
9156     SMLoc ErrorLoc = IDLoc;
9157     if (ErrorInfo != ~0ULL) {
9158       if (ErrorInfo >= Operands.size())
9159         return Error(IDLoc, "too few operands for instruction");
9160 
9161       ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
9162       if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
9163     }
9164 
9165     return Error(ErrorLoc, "invalid operand for instruction");
9166   }
9167   case Match_MnemonicFail:
9168     return Error(IDLoc, "invalid instruction",
9169                  ((ARMOperand &)*Operands[0]).getLocRange());
9170   case Match_RequiresNotITBlock:
9171     return Error(IDLoc, "flag setting instruction only valid outside IT block");
9172   case Match_RequiresITBlock:
9173     return Error(IDLoc, "instruction only valid inside IT block");
9174   case Match_RequiresV6:
9175     return Error(IDLoc, "instruction variant requires ARMv6 or later");
9176   case Match_RequiresThumb2:
9177     return Error(IDLoc, "instruction variant requires Thumb2");
9178   case Match_RequiresV8:
9179     return Error(IDLoc, "instruction variant requires ARMv8 or later");
9180   case Match_ImmRange0_15: {
9181     SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
9182     if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
9183     return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
9184   }
9185   case Match_ImmRange0_239: {
9186     SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getStartLoc();
9187     if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
9188     return Error(ErrorLoc, "immediate operand must be in the range [0,239]");
9189   }
9190   case Match_AlignedMemoryRequiresNone:
9191   case Match_DupAlignedMemoryRequiresNone:
9192   case Match_AlignedMemoryRequires16:
9193   case Match_DupAlignedMemoryRequires16:
9194   case Match_AlignedMemoryRequires32:
9195   case Match_DupAlignedMemoryRequires32:
9196   case Match_AlignedMemoryRequires64:
9197   case Match_DupAlignedMemoryRequires64:
9198   case Match_AlignedMemoryRequires64or128:
9199   case Match_DupAlignedMemoryRequires64or128:
9200   case Match_AlignedMemoryRequires64or128or256:
9201   {
9202     SMLoc ErrorLoc = ((ARMOperand &)*Operands[ErrorInfo]).getAlignmentLoc();
9203     if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
9204     switch (MatchResult) {
9205       default:
9206         llvm_unreachable("Missing Match_Aligned type");
9207       case Match_AlignedMemoryRequiresNone:
9208       case Match_DupAlignedMemoryRequiresNone:
9209         return Error(ErrorLoc, "alignment must be omitted");
9210       case Match_AlignedMemoryRequires16:
9211       case Match_DupAlignedMemoryRequires16:
9212         return Error(ErrorLoc, "alignment must be 16 or omitted");
9213       case Match_AlignedMemoryRequires32:
9214       case Match_DupAlignedMemoryRequires32:
9215         return Error(ErrorLoc, "alignment must be 32 or omitted");
9216       case Match_AlignedMemoryRequires64:
9217       case Match_DupAlignedMemoryRequires64:
9218         return Error(ErrorLoc, "alignment must be 64 or omitted");
9219       case Match_AlignedMemoryRequires64or128:
9220       case Match_DupAlignedMemoryRequires64or128:
9221         return Error(ErrorLoc, "alignment must be 64, 128 or omitted");
9222       case Match_AlignedMemoryRequires64or128or256:
9223         return Error(ErrorLoc, "alignment must be 64, 128, 256 or omitted");
9224     }
9225   }
9226   }
9227 
9228   llvm_unreachable("Implement any new match types added!");
9229 }
9230 
9231 /// parseDirective parses the arm specific directives
9232 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
9233   const MCObjectFileInfo::Environment Format =
9234     getContext().getObjectFileInfo()->getObjectFileType();
9235   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
9236   bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
9237 
9238   StringRef IDVal = DirectiveID.getIdentifier();
9239   if (IDVal == ".word")
9240     return parseLiteralValues(4, DirectiveID.getLoc());
9241   else if (IDVal == ".short" || IDVal == ".hword")
9242     return parseLiteralValues(2, DirectiveID.getLoc());
9243   else if (IDVal == ".thumb")
9244     return parseDirectiveThumb(DirectiveID.getLoc());
9245   else if (IDVal == ".arm")
9246     return parseDirectiveARM(DirectiveID.getLoc());
9247   else if (IDVal == ".thumb_func")
9248     return parseDirectiveThumbFunc(DirectiveID.getLoc());
9249   else if (IDVal == ".code")
9250     return parseDirectiveCode(DirectiveID.getLoc());
9251   else if (IDVal == ".syntax")
9252     return parseDirectiveSyntax(DirectiveID.getLoc());
9253   else if (IDVal == ".unreq")
9254     return parseDirectiveUnreq(DirectiveID.getLoc());
9255   else if (IDVal == ".fnend")
9256     return parseDirectiveFnEnd(DirectiveID.getLoc());
9257   else if (IDVal == ".cantunwind")
9258     return parseDirectiveCantUnwind(DirectiveID.getLoc());
9259   else if (IDVal == ".personality")
9260     return parseDirectivePersonality(DirectiveID.getLoc());
9261   else if (IDVal == ".handlerdata")
9262     return parseDirectiveHandlerData(DirectiveID.getLoc());
9263   else if (IDVal == ".setfp")
9264     return parseDirectiveSetFP(DirectiveID.getLoc());
9265   else if (IDVal == ".pad")
9266     return parseDirectivePad(DirectiveID.getLoc());
9267   else if (IDVal == ".save")
9268     return parseDirectiveRegSave(DirectiveID.getLoc(), false);
9269   else if (IDVal == ".vsave")
9270     return parseDirectiveRegSave(DirectiveID.getLoc(), true);
9271   else if (IDVal == ".ltorg" || IDVal == ".pool")
9272     return parseDirectiveLtorg(DirectiveID.getLoc());
9273   else if (IDVal == ".even")
9274     return parseDirectiveEven(DirectiveID.getLoc());
9275   else if (IDVal == ".personalityindex")
9276     return parseDirectivePersonalityIndex(DirectiveID.getLoc());
9277   else if (IDVal == ".unwind_raw")
9278     return parseDirectiveUnwindRaw(DirectiveID.getLoc());
9279   else if (IDVal == ".movsp")
9280     return parseDirectiveMovSP(DirectiveID.getLoc());
9281   else if (IDVal == ".arch_extension")
9282     return parseDirectiveArchExtension(DirectiveID.getLoc());
9283   else if (IDVal == ".align")
9284     return parseDirectiveAlign(DirectiveID.getLoc());
9285   else if (IDVal == ".thumb_set")
9286     return parseDirectiveThumbSet(DirectiveID.getLoc());
9287 
9288   if (!IsMachO && !IsCOFF) {
9289     if (IDVal == ".arch")
9290       return parseDirectiveArch(DirectiveID.getLoc());
9291     else if (IDVal == ".cpu")
9292       return parseDirectiveCPU(DirectiveID.getLoc());
9293     else if (IDVal == ".eabi_attribute")
9294       return parseDirectiveEabiAttr(DirectiveID.getLoc());
9295     else if (IDVal == ".fpu")
9296       return parseDirectiveFPU(DirectiveID.getLoc());
9297     else if (IDVal == ".fnstart")
9298       return parseDirectiveFnStart(DirectiveID.getLoc());
9299     else if (IDVal == ".inst")
9300       return parseDirectiveInst(DirectiveID.getLoc());
9301     else if (IDVal == ".inst.n")
9302       return parseDirectiveInst(DirectiveID.getLoc(), 'n');
9303     else if (IDVal == ".inst.w")
9304       return parseDirectiveInst(DirectiveID.getLoc(), 'w');
9305     else if (IDVal == ".object_arch")
9306       return parseDirectiveObjectArch(DirectiveID.getLoc());
9307     else if (IDVal == ".tlsdescseq")
9308       return parseDirectiveTLSDescSeq(DirectiveID.getLoc());
9309   }
9310 
9311   return true;
9312 }
9313 
9314 /// parseLiteralValues
9315 ///  ::= .hword expression [, expression]*
9316 ///  ::= .short expression [, expression]*
9317 ///  ::= .word expression [, expression]*
9318 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
9319   MCAsmParser &Parser = getParser();
9320   if (getLexer().isNot(AsmToken::EndOfStatement)) {
9321     for (;;) {
9322       const MCExpr *Value;
9323       if (getParser().parseExpression(Value)) {
9324         return false;
9325       }
9326 
9327       getParser().getStreamer().EmitValue(Value, Size, L);
9328 
9329       if (getLexer().is(AsmToken::EndOfStatement))
9330         break;
9331 
9332       // FIXME: Improve diagnostic.
9333       if (getLexer().isNot(AsmToken::Comma)) {
9334         Error(L, "unexpected token in directive");
9335         return false;
9336       }
9337       Parser.Lex();
9338     }
9339   }
9340 
9341   Parser.Lex();
9342   return false;
9343 }
9344 
9345 /// parseDirectiveThumb
9346 ///  ::= .thumb
9347 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
9348   MCAsmParser &Parser = getParser();
9349   if (getLexer().isNot(AsmToken::EndOfStatement)) {
9350     Error(L, "unexpected token in directive");
9351     return false;
9352   }
9353   Parser.Lex();
9354 
9355   if (!hasThumb()) {
9356     Error(L, "target does not support Thumb mode");
9357     return false;
9358   }
9359 
9360   if (!isThumb())
9361     SwitchMode();
9362 
9363   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
9364   return false;
9365 }
9366 
9367 /// parseDirectiveARM
9368 ///  ::= .arm
9369 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
9370   MCAsmParser &Parser = getParser();
9371   if (getLexer().isNot(AsmToken::EndOfStatement)) {
9372     Error(L, "unexpected token in directive");
9373     return false;
9374   }
9375   Parser.Lex();
9376 
9377   if (!hasARM()) {
9378     Error(L, "target does not support ARM mode");
9379     return false;
9380   }
9381 
9382   if (isThumb())
9383     SwitchMode();
9384 
9385   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
9386   return false;
9387 }
9388 
9389 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
9390   // We need to flush the current implicit IT block on a label, because it is
9391   // not legal to branch into an IT block.
9392   flushPendingInstructions(getStreamer());
9393   if (NextSymbolIsThumb) {
9394     getParser().getStreamer().EmitThumbFunc(Symbol);
9395     NextSymbolIsThumb = false;
9396   }
9397 }
9398 
9399 /// parseDirectiveThumbFunc
9400 ///  ::= .thumbfunc symbol_name
9401 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
9402   MCAsmParser &Parser = getParser();
9403   const auto Format = getContext().getObjectFileInfo()->getObjectFileType();
9404   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
9405 
9406   // Darwin asm has (optionally) function name after .thumb_func direction
9407   // ELF doesn't
9408   if (IsMachO) {
9409     const AsmToken &Tok = Parser.getTok();
9410     if (Tok.isNot(AsmToken::EndOfStatement)) {
9411       if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) {
9412         Error(L, "unexpected token in .thumb_func directive");
9413         return false;
9414       }
9415 
9416       MCSymbol *Func =
9417           getParser().getContext().getOrCreateSymbol(Tok.getIdentifier());
9418       getParser().getStreamer().EmitThumbFunc(Func);
9419       Parser.Lex(); // Consume the identifier token.
9420       return false;
9421     }
9422   }
9423 
9424   if (getLexer().isNot(AsmToken::EndOfStatement)) {
9425     Error(Parser.getTok().getLoc(), "unexpected token in directive");
9426     return false;
9427   }
9428 
9429   NextSymbolIsThumb = true;
9430   return false;
9431 }
9432 
9433 /// parseDirectiveSyntax
9434 ///  ::= .syntax unified | divided
9435 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
9436   MCAsmParser &Parser = getParser();
9437   const AsmToken &Tok = Parser.getTok();
9438   if (Tok.isNot(AsmToken::Identifier)) {
9439     Error(L, "unexpected token in .syntax directive");
9440     return false;
9441   }
9442 
9443   StringRef Mode = Tok.getString();
9444   if (Mode == "unified" || Mode == "UNIFIED") {
9445     Parser.Lex();
9446   } else if (Mode == "divided" || Mode == "DIVIDED") {
9447     Error(L, "'.syntax divided' arm asssembly not supported");
9448     return false;
9449   } else {
9450     Error(L, "unrecognized syntax mode in .syntax directive");
9451     return false;
9452   }
9453 
9454   if (getLexer().isNot(AsmToken::EndOfStatement)) {
9455     Error(Parser.getTok().getLoc(), "unexpected token in directive");
9456     return false;
9457   }
9458   Parser.Lex();
9459 
9460   // TODO tell the MC streamer the mode
9461   // getParser().getStreamer().Emit???();
9462   return false;
9463 }
9464 
9465 /// parseDirectiveCode
9466 ///  ::= .code 16 | 32
9467 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
9468   MCAsmParser &Parser = getParser();
9469   const AsmToken &Tok = Parser.getTok();
9470   if (Tok.isNot(AsmToken::Integer)) {
9471     Error(L, "unexpected token in .code directive");
9472     return false;
9473   }
9474   int64_t Val = Parser.getTok().getIntVal();
9475   if (Val != 16 && Val != 32) {
9476     Error(L, "invalid operand to .code directive");
9477     return false;
9478   }
9479   Parser.Lex();
9480 
9481   if (getLexer().isNot(AsmToken::EndOfStatement)) {
9482     Error(Parser.getTok().getLoc(), "unexpected token in directive");
9483     return false;
9484   }
9485   Parser.Lex();
9486 
9487   if (Val == 16) {
9488     if (!hasThumb()) {
9489       Error(L, "target does not support Thumb mode");
9490       return false;
9491     }
9492 
9493     if (!isThumb())
9494       SwitchMode();
9495     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
9496   } else {
9497     if (!hasARM()) {
9498       Error(L, "target does not support ARM mode");
9499       return false;
9500     }
9501 
9502     if (isThumb())
9503       SwitchMode();
9504     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
9505   }
9506 
9507   return false;
9508 }
9509 
9510 /// parseDirectiveReq
9511 ///  ::= name .req registername
9512 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
9513   MCAsmParser &Parser = getParser();
9514   Parser.Lex(); // Eat the '.req' token.
9515   unsigned Reg;
9516   SMLoc SRegLoc, ERegLoc;
9517   if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
9518     Error(SRegLoc, "register name expected");
9519     return false;
9520   }
9521 
9522   // Shouldn't be anything else.
9523   if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
9524     Error(Parser.getTok().getLoc(), "unexpected input in .req directive.");
9525     return false;
9526   }
9527 
9528   Parser.Lex(); // Consume the EndOfStatement
9529 
9530   if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg) {
9531     Error(SRegLoc, "redefinition of '" + Name + "' does not match original.");
9532     return false;
9533   }
9534 
9535   return false;
9536 }
9537 
9538 /// parseDirectiveUneq
9539 ///  ::= .unreq registername
9540 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
9541   MCAsmParser &Parser = getParser();
9542   if (Parser.getTok().isNot(AsmToken::Identifier)) {
9543     Error(L, "unexpected input in .unreq directive.");
9544     return false;
9545   }
9546   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
9547   Parser.Lex(); // Eat the identifier.
9548   return false;
9549 }
9550 
9551 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
9552 // before, if supported by the new target, or emit mapping symbols for the mode
9553 // switch.
9554 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
9555   if (WasThumb != isThumb()) {
9556     if (WasThumb && hasThumb()) {
9557       // Stay in Thumb mode
9558       SwitchMode();
9559     } else if (!WasThumb && hasARM()) {
9560       // Stay in ARM mode
9561       SwitchMode();
9562     } else {
9563       // Mode switch forced, because the new arch doesn't support the old mode.
9564       getParser().getStreamer().EmitAssemblerFlag(isThumb() ? MCAF_Code16
9565                                                             : MCAF_Code32);
9566       // Warn about the implcit mode switch. GAS does not switch modes here,
9567       // but instead stays in the old mode, reporting an error on any following
9568       // instructions as the mode does not exist on the target.
9569       Warning(Loc, Twine("new target does not support ") +
9570                        (WasThumb ? "thumb" : "arm") + " mode, switching to " +
9571                        (!WasThumb ? "thumb" : "arm") + " mode");
9572     }
9573   }
9574 }
9575 
9576 /// parseDirectiveArch
9577 ///  ::= .arch token
9578 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
9579   StringRef Arch = getParser().parseStringToEndOfStatement().trim();
9580 
9581   unsigned ID = ARM::parseArch(Arch);
9582 
9583   if (ID == ARM::AK_INVALID) {
9584     Error(L, "Unknown arch name");
9585     return false;
9586   }
9587 
9588   bool WasThumb = isThumb();
9589   Triple T;
9590   MCSubtargetInfo &STI = copySTI();
9591   STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str());
9592   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9593   FixModeAfterArchChange(WasThumb, L);
9594 
9595   getTargetStreamer().emitArch(ID);
9596   return false;
9597 }
9598 
9599 /// parseDirectiveEabiAttr
9600 ///  ::= .eabi_attribute int, int [, "str"]
9601 ///  ::= .eabi_attribute Tag_name, int [, "str"]
9602 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
9603   MCAsmParser &Parser = getParser();
9604   int64_t Tag;
9605   SMLoc TagLoc;
9606   TagLoc = Parser.getTok().getLoc();
9607   if (Parser.getTok().is(AsmToken::Identifier)) {
9608     StringRef Name = Parser.getTok().getIdentifier();
9609     Tag = ARMBuildAttrs::AttrTypeFromString(Name);
9610     if (Tag == -1) {
9611       Error(TagLoc, "attribute name not recognised: " + Name);
9612       return false;
9613     }
9614     Parser.Lex();
9615   } else {
9616     const MCExpr *AttrExpr;
9617 
9618     TagLoc = Parser.getTok().getLoc();
9619     if (Parser.parseExpression(AttrExpr)) {
9620       return false;
9621     }
9622 
9623     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
9624     if (!CE) {
9625       Error(TagLoc, "expected numeric constant");
9626       return false;
9627     }
9628 
9629     Tag = CE->getValue();
9630   }
9631 
9632   if (Parser.getTok().isNot(AsmToken::Comma)) {
9633     Error(Parser.getTok().getLoc(), "comma expected");
9634     return false;
9635   }
9636   Parser.Lex(); // skip comma
9637 
9638   StringRef StringValue = "";
9639   bool IsStringValue = false;
9640 
9641   int64_t IntegerValue = 0;
9642   bool IsIntegerValue = false;
9643 
9644   if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
9645     IsStringValue = true;
9646   else if (Tag == ARMBuildAttrs::compatibility) {
9647     IsStringValue = true;
9648     IsIntegerValue = true;
9649   } else if (Tag < 32 || Tag % 2 == 0)
9650     IsIntegerValue = true;
9651   else if (Tag % 2 == 1)
9652     IsStringValue = true;
9653   else
9654     llvm_unreachable("invalid tag type");
9655 
9656   if (IsIntegerValue) {
9657     const MCExpr *ValueExpr;
9658     SMLoc ValueExprLoc = Parser.getTok().getLoc();
9659     if (Parser.parseExpression(ValueExpr)) {
9660       return false;
9661     }
9662 
9663     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
9664     if (!CE) {
9665       Error(ValueExprLoc, "expected numeric constant");
9666       return false;
9667     }
9668 
9669     IntegerValue = CE->getValue();
9670   }
9671 
9672   if (Tag == ARMBuildAttrs::compatibility) {
9673     if (Parser.getTok().isNot(AsmToken::Comma))
9674       IsStringValue = false;
9675     if (Parser.getTok().isNot(AsmToken::Comma)) {
9676       Error(Parser.getTok().getLoc(), "comma expected");
9677       return false;
9678     } else {
9679        Parser.Lex();
9680     }
9681   }
9682 
9683   if (IsStringValue) {
9684     if (Parser.getTok().isNot(AsmToken::String)) {
9685       Error(Parser.getTok().getLoc(), "bad string constant");
9686       return false;
9687     }
9688 
9689     StringValue = Parser.getTok().getStringContents();
9690     Parser.Lex();
9691   }
9692 
9693   if (IsIntegerValue && IsStringValue) {
9694     assert(Tag == ARMBuildAttrs::compatibility);
9695     getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
9696   } else if (IsIntegerValue)
9697     getTargetStreamer().emitAttribute(Tag, IntegerValue);
9698   else if (IsStringValue)
9699     getTargetStreamer().emitTextAttribute(Tag, StringValue);
9700   return false;
9701 }
9702 
9703 /// parseDirectiveCPU
9704 ///  ::= .cpu str
9705 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
9706   StringRef CPU = getParser().parseStringToEndOfStatement().trim();
9707   getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
9708 
9709   // FIXME: This is using table-gen data, but should be moved to
9710   // ARMTargetParser once that is table-gen'd.
9711   if (!getSTI().isCPUStringValid(CPU)) {
9712     Error(L, "Unknown CPU name");
9713     return false;
9714   }
9715 
9716   bool WasThumb = isThumb();
9717   MCSubtargetInfo &STI = copySTI();
9718   STI.setDefaultFeatures(CPU, "");
9719   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9720   FixModeAfterArchChange(WasThumb, L);
9721 
9722   return false;
9723 }
9724 /// parseDirectiveFPU
9725 ///  ::= .fpu str
9726 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
9727   SMLoc FPUNameLoc = getTok().getLoc();
9728   StringRef FPU = getParser().parseStringToEndOfStatement().trim();
9729 
9730   unsigned ID = ARM::parseFPU(FPU);
9731   std::vector<StringRef> Features;
9732   if (!ARM::getFPUFeatures(ID, Features)) {
9733     Error(FPUNameLoc, "Unknown FPU name");
9734     return false;
9735   }
9736 
9737   MCSubtargetInfo &STI = copySTI();
9738   for (auto Feature : Features)
9739     STI.ApplyFeatureFlag(Feature);
9740   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9741 
9742   getTargetStreamer().emitFPU(ID);
9743   return false;
9744 }
9745 
9746 /// parseDirectiveFnStart
9747 ///  ::= .fnstart
9748 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
9749   if (UC.hasFnStart()) {
9750     Error(L, ".fnstart starts before the end of previous one");
9751     UC.emitFnStartLocNotes();
9752     return false;
9753   }
9754 
9755   // Reset the unwind directives parser state
9756   UC.reset();
9757 
9758   getTargetStreamer().emitFnStart();
9759 
9760   UC.recordFnStart(L);
9761   return false;
9762 }
9763 
9764 /// parseDirectiveFnEnd
9765 ///  ::= .fnend
9766 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
9767   // Check the ordering of unwind directives
9768   if (!UC.hasFnStart()) {
9769     Error(L, ".fnstart must precede .fnend directive");
9770     return false;
9771   }
9772 
9773   // Reset the unwind directives parser state
9774   getTargetStreamer().emitFnEnd();
9775 
9776   UC.reset();
9777   return false;
9778 }
9779 
9780 /// parseDirectiveCantUnwind
9781 ///  ::= .cantunwind
9782 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
9783   UC.recordCantUnwind(L);
9784 
9785   // Check the ordering of unwind directives
9786   if (!UC.hasFnStart()) {
9787     Error(L, ".fnstart must precede .cantunwind directive");
9788     return false;
9789   }
9790   if (UC.hasHandlerData()) {
9791     Error(L, ".cantunwind can't be used with .handlerdata directive");
9792     UC.emitHandlerDataLocNotes();
9793     return false;
9794   }
9795   if (UC.hasPersonality()) {
9796     Error(L, ".cantunwind can't be used with .personality directive");
9797     UC.emitPersonalityLocNotes();
9798     return false;
9799   }
9800 
9801   getTargetStreamer().emitCantUnwind();
9802   return false;
9803 }
9804 
9805 /// parseDirectivePersonality
9806 ///  ::= .personality name
9807 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
9808   MCAsmParser &Parser = getParser();
9809   bool HasExistingPersonality = UC.hasPersonality();
9810 
9811   UC.recordPersonality(L);
9812 
9813   // Check the ordering of unwind directives
9814   if (!UC.hasFnStart()) {
9815     Error(L, ".fnstart must precede .personality directive");
9816     return false;
9817   }
9818   if (UC.cantUnwind()) {
9819     Error(L, ".personality can't be used with .cantunwind directive");
9820     UC.emitCantUnwindLocNotes();
9821     return false;
9822   }
9823   if (UC.hasHandlerData()) {
9824     Error(L, ".personality must precede .handlerdata directive");
9825     UC.emitHandlerDataLocNotes();
9826     return false;
9827   }
9828   if (HasExistingPersonality) {
9829     Error(L, "multiple personality directives");
9830     UC.emitPersonalityLocNotes();
9831     return false;
9832   }
9833 
9834   // Parse the name of the personality routine
9835   if (Parser.getTok().isNot(AsmToken::Identifier)) {
9836     Error(L, "unexpected input in .personality directive.");
9837     return false;
9838   }
9839   StringRef Name(Parser.getTok().getIdentifier());
9840   Parser.Lex();
9841 
9842   MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
9843   getTargetStreamer().emitPersonality(PR);
9844   return false;
9845 }
9846 
9847 /// parseDirectiveHandlerData
9848 ///  ::= .handlerdata
9849 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
9850   UC.recordHandlerData(L);
9851 
9852   // Check the ordering of unwind directives
9853   if (!UC.hasFnStart()) {
9854     Error(L, ".fnstart must precede .personality directive");
9855     return false;
9856   }
9857   if (UC.cantUnwind()) {
9858     Error(L, ".handlerdata can't be used with .cantunwind directive");
9859     UC.emitCantUnwindLocNotes();
9860     return false;
9861   }
9862 
9863   getTargetStreamer().emitHandlerData();
9864   return false;
9865 }
9866 
9867 /// parseDirectiveSetFP
9868 ///  ::= .setfp fpreg, spreg [, offset]
9869 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
9870   MCAsmParser &Parser = getParser();
9871   // Check the ordering of unwind directives
9872   if (!UC.hasFnStart()) {
9873     Error(L, ".fnstart must precede .setfp directive");
9874     return false;
9875   }
9876   if (UC.hasHandlerData()) {
9877     Error(L, ".setfp must precede .handlerdata directive");
9878     return false;
9879   }
9880 
9881   // Parse fpreg
9882   SMLoc FPRegLoc = Parser.getTok().getLoc();
9883   int FPReg = tryParseRegister();
9884   if (FPReg == -1) {
9885     Error(FPRegLoc, "frame pointer register expected");
9886     return false;
9887   }
9888 
9889   // Consume comma
9890   if (Parser.getTok().isNot(AsmToken::Comma)) {
9891     Error(Parser.getTok().getLoc(), "comma expected");
9892     return false;
9893   }
9894   Parser.Lex(); // skip comma
9895 
9896   // Parse spreg
9897   SMLoc SPRegLoc = Parser.getTok().getLoc();
9898   int SPReg = tryParseRegister();
9899   if (SPReg == -1) {
9900     Error(SPRegLoc, "stack pointer register expected");
9901     return false;
9902   }
9903 
9904   if (SPReg != ARM::SP && SPReg != UC.getFPReg()) {
9905     Error(SPRegLoc, "register should be either $sp or the latest fp register");
9906     return false;
9907   }
9908 
9909   // Update the frame pointer register
9910   UC.saveFPReg(FPReg);
9911 
9912   // Parse offset
9913   int64_t Offset = 0;
9914   if (Parser.getTok().is(AsmToken::Comma)) {
9915     Parser.Lex(); // skip comma
9916 
9917     if (Parser.getTok().isNot(AsmToken::Hash) &&
9918         Parser.getTok().isNot(AsmToken::Dollar)) {
9919       Error(Parser.getTok().getLoc(), "'#' expected");
9920       return false;
9921     }
9922     Parser.Lex(); // skip hash token.
9923 
9924     const MCExpr *OffsetExpr;
9925     SMLoc ExLoc = Parser.getTok().getLoc();
9926     SMLoc EndLoc;
9927     if (getParser().parseExpression(OffsetExpr, EndLoc)) {
9928       Error(ExLoc, "malformed setfp offset");
9929       return false;
9930     }
9931     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9932     if (!CE) {
9933       Error(ExLoc, "setfp offset must be an immediate");
9934       return false;
9935     }
9936 
9937     Offset = CE->getValue();
9938   }
9939 
9940   getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
9941                                 static_cast<unsigned>(SPReg), Offset);
9942   return false;
9943 }
9944 
9945 /// parseDirective
9946 ///  ::= .pad offset
9947 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
9948   MCAsmParser &Parser = getParser();
9949   // Check the ordering of unwind directives
9950   if (!UC.hasFnStart()) {
9951     Error(L, ".fnstart must precede .pad directive");
9952     return false;
9953   }
9954   if (UC.hasHandlerData()) {
9955     Error(L, ".pad must precede .handlerdata directive");
9956     return false;
9957   }
9958 
9959   // Parse the offset
9960   if (Parser.getTok().isNot(AsmToken::Hash) &&
9961       Parser.getTok().isNot(AsmToken::Dollar)) {
9962     Error(Parser.getTok().getLoc(), "'#' expected");
9963     return false;
9964   }
9965   Parser.Lex(); // skip hash token.
9966 
9967   const MCExpr *OffsetExpr;
9968   SMLoc ExLoc = Parser.getTok().getLoc();
9969   SMLoc EndLoc;
9970   if (getParser().parseExpression(OffsetExpr, EndLoc)) {
9971     Error(ExLoc, "malformed pad offset");
9972     return false;
9973   }
9974   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9975   if (!CE) {
9976     Error(ExLoc, "pad offset must be an immediate");
9977     return false;
9978   }
9979 
9980   getTargetStreamer().emitPad(CE->getValue());
9981   return false;
9982 }
9983 
9984 /// parseDirectiveRegSave
9985 ///  ::= .save  { registers }
9986 ///  ::= .vsave { registers }
9987 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
9988   // Check the ordering of unwind directives
9989   if (!UC.hasFnStart()) {
9990     Error(L, ".fnstart must precede .save or .vsave directives");
9991     return false;
9992   }
9993   if (UC.hasHandlerData()) {
9994     Error(L, ".save or .vsave must precede .handlerdata directive");
9995     return false;
9996   }
9997 
9998   // RAII object to make sure parsed operands are deleted.
9999   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
10000 
10001   // Parse the register list
10002   if (parseRegisterList(Operands))
10003     return false;
10004   ARMOperand &Op = (ARMOperand &)*Operands[0];
10005   if (!IsVector && !Op.isRegList()) {
10006     Error(L, ".save expects GPR registers");
10007     return false;
10008   }
10009   if (IsVector && !Op.isDPRRegList()) {
10010     Error(L, ".vsave expects DPR registers");
10011     return false;
10012   }
10013 
10014   getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
10015   return false;
10016 }
10017 
10018 /// parseDirectiveInst
10019 ///  ::= .inst opcode [, ...]
10020 ///  ::= .inst.n opcode [, ...]
10021 ///  ::= .inst.w opcode [, ...]
10022 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
10023   MCAsmParser &Parser = getParser();
10024   int Width;
10025 
10026   if (isThumb()) {
10027     switch (Suffix) {
10028     case 'n':
10029       Width = 2;
10030       break;
10031     case 'w':
10032       Width = 4;
10033       break;
10034     default:
10035       Error(Loc, "cannot determine Thumb instruction size, "
10036                  "use inst.n/inst.w instead");
10037       return false;
10038     }
10039   } else {
10040     if (Suffix) {
10041       Error(Loc, "width suffixes are invalid in ARM mode");
10042       return false;
10043     }
10044     Width = 4;
10045   }
10046 
10047   if (getLexer().is(AsmToken::EndOfStatement)) {
10048     Error(Loc, "expected expression following directive");
10049     return false;
10050   }
10051 
10052   for (;;) {
10053     const MCExpr *Expr;
10054 
10055     if (getParser().parseExpression(Expr)) {
10056       Error(Loc, "expected expression");
10057       return false;
10058     }
10059 
10060     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
10061     if (!Value) {
10062       Error(Loc, "expected constant expression");
10063       return false;
10064     }
10065 
10066     switch (Width) {
10067     case 2:
10068       if (Value->getValue() > 0xffff) {
10069         Error(Loc, "inst.n operand is too big, use inst.w instead");
10070         return false;
10071       }
10072       break;
10073     case 4:
10074       if (Value->getValue() > 0xffffffff) {
10075         Error(Loc,
10076               StringRef(Suffix ? "inst.w" : "inst") + " operand is too big");
10077         return false;
10078       }
10079       break;
10080     default:
10081       llvm_unreachable("only supported widths are 2 and 4");
10082     }
10083 
10084     getTargetStreamer().emitInst(Value->getValue(), Suffix);
10085 
10086     if (getLexer().is(AsmToken::EndOfStatement))
10087       break;
10088 
10089     if (getLexer().isNot(AsmToken::Comma)) {
10090       Error(Loc, "unexpected token in directive");
10091       return false;
10092     }
10093 
10094     Parser.Lex();
10095   }
10096 
10097   Parser.Lex();
10098   return false;
10099 }
10100 
10101 /// parseDirectiveLtorg
10102 ///  ::= .ltorg | .pool
10103 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
10104   getTargetStreamer().emitCurrentConstantPool();
10105   return false;
10106 }
10107 
10108 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
10109   const MCSection *Section = getStreamer().getCurrentSectionOnly();
10110 
10111   if (getLexer().isNot(AsmToken::EndOfStatement)) {
10112     TokError("unexpected token in directive");
10113     return false;
10114   }
10115 
10116   if (!Section) {
10117     getStreamer().InitSections(false);
10118     Section = getStreamer().getCurrentSectionOnly();
10119   }
10120 
10121   assert(Section && "must have section to emit alignment");
10122   if (Section->UseCodeAlign())
10123     getStreamer().EmitCodeAlignment(2);
10124   else
10125     getStreamer().EmitValueToAlignment(2);
10126 
10127   return false;
10128 }
10129 
10130 /// parseDirectivePersonalityIndex
10131 ///   ::= .personalityindex index
10132 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
10133   MCAsmParser &Parser = getParser();
10134   bool HasExistingPersonality = UC.hasPersonality();
10135 
10136   UC.recordPersonalityIndex(L);
10137 
10138   if (!UC.hasFnStart()) {
10139     Error(L, ".fnstart must precede .personalityindex directive");
10140     return false;
10141   }
10142   if (UC.cantUnwind()) {
10143     Error(L, ".personalityindex cannot be used with .cantunwind");
10144     UC.emitCantUnwindLocNotes();
10145     return false;
10146   }
10147   if (UC.hasHandlerData()) {
10148     Error(L, ".personalityindex must precede .handlerdata directive");
10149     UC.emitHandlerDataLocNotes();
10150     return false;
10151   }
10152   if (HasExistingPersonality) {
10153     Error(L, "multiple personality directives");
10154     UC.emitPersonalityLocNotes();
10155     return false;
10156   }
10157 
10158   const MCExpr *IndexExpression;
10159   SMLoc IndexLoc = Parser.getTok().getLoc();
10160   if (Parser.parseExpression(IndexExpression)) {
10161     return false;
10162   }
10163 
10164   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
10165   if (!CE) {
10166     Error(IndexLoc, "index must be a constant number");
10167     return false;
10168   }
10169   if (CE->getValue() < 0 ||
10170       CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX) {
10171     Error(IndexLoc, "personality routine index should be in range [0-3]");
10172     return false;
10173   }
10174 
10175   getTargetStreamer().emitPersonalityIndex(CE->getValue());
10176   return false;
10177 }
10178 
10179 /// parseDirectiveUnwindRaw
10180 ///   ::= .unwind_raw offset, opcode [, opcode...]
10181 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
10182   MCAsmParser &Parser = getParser();
10183   if (!UC.hasFnStart()) {
10184     Error(L, ".fnstart must precede .unwind_raw directives");
10185     return false;
10186   }
10187 
10188   int64_t StackOffset;
10189 
10190   const MCExpr *OffsetExpr;
10191   SMLoc OffsetLoc = getLexer().getLoc();
10192   if (getLexer().is(AsmToken::EndOfStatement) ||
10193       getParser().parseExpression(OffsetExpr)) {
10194     Error(OffsetLoc, "expected expression");
10195     return false;
10196   }
10197 
10198   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10199   if (!CE) {
10200     Error(OffsetLoc, "offset must be a constant");
10201     return false;
10202   }
10203 
10204   StackOffset = CE->getValue();
10205 
10206   if (getLexer().isNot(AsmToken::Comma)) {
10207     Error(getLexer().getLoc(), "expected comma");
10208     return false;
10209   }
10210   Parser.Lex();
10211 
10212   SmallVector<uint8_t, 16> Opcodes;
10213   for (;;) {
10214     const MCExpr *OE;
10215 
10216     SMLoc OpcodeLoc = getLexer().getLoc();
10217     if (getLexer().is(AsmToken::EndOfStatement) || Parser.parseExpression(OE)) {
10218       Error(OpcodeLoc, "expected opcode expression");
10219       return false;
10220     }
10221 
10222     const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
10223     if (!OC) {
10224       Error(OpcodeLoc, "opcode value must be a constant");
10225       return false;
10226     }
10227 
10228     const int64_t Opcode = OC->getValue();
10229     if (Opcode & ~0xff) {
10230       Error(OpcodeLoc, "invalid opcode");
10231       return false;
10232     }
10233 
10234     Opcodes.push_back(uint8_t(Opcode));
10235 
10236     if (getLexer().is(AsmToken::EndOfStatement))
10237       break;
10238 
10239     if (getLexer().isNot(AsmToken::Comma)) {
10240       Error(getLexer().getLoc(), "unexpected token in directive");
10241       return false;
10242     }
10243 
10244     Parser.Lex();
10245   }
10246 
10247   getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
10248 
10249   Parser.Lex();
10250   return false;
10251 }
10252 
10253 /// parseDirectiveTLSDescSeq
10254 ///   ::= .tlsdescseq tls-variable
10255 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
10256   MCAsmParser &Parser = getParser();
10257 
10258   if (getLexer().isNot(AsmToken::Identifier)) {
10259     TokError("expected variable after '.tlsdescseq' directive");
10260     return false;
10261   }
10262 
10263   const MCSymbolRefExpr *SRE =
10264     MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
10265                             MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
10266   Lex();
10267 
10268   if (getLexer().isNot(AsmToken::EndOfStatement)) {
10269     Error(Parser.getTok().getLoc(), "unexpected token");
10270     return false;
10271   }
10272 
10273   getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
10274   return false;
10275 }
10276 
10277 /// parseDirectiveMovSP
10278 ///  ::= .movsp reg [, #offset]
10279 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
10280   MCAsmParser &Parser = getParser();
10281   if (!UC.hasFnStart()) {
10282     Error(L, ".fnstart must precede .movsp directives");
10283     return false;
10284   }
10285   if (UC.getFPReg() != ARM::SP) {
10286     Error(L, "unexpected .movsp directive");
10287     return false;
10288   }
10289 
10290   SMLoc SPRegLoc = Parser.getTok().getLoc();
10291   int SPReg = tryParseRegister();
10292   if (SPReg == -1) {
10293     Error(SPRegLoc, "register expected");
10294     return false;
10295   }
10296 
10297   if (SPReg == ARM::SP || SPReg == ARM::PC) {
10298     Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
10299     return false;
10300   }
10301 
10302   int64_t Offset = 0;
10303   if (Parser.getTok().is(AsmToken::Comma)) {
10304     Parser.Lex();
10305 
10306     if (Parser.getTok().isNot(AsmToken::Hash)) {
10307       Error(Parser.getTok().getLoc(), "expected #constant");
10308       return false;
10309     }
10310     Parser.Lex();
10311 
10312     const MCExpr *OffsetExpr;
10313     SMLoc OffsetLoc = Parser.getTok().getLoc();
10314     if (Parser.parseExpression(OffsetExpr)) {
10315       Error(OffsetLoc, "malformed offset expression");
10316       return false;
10317     }
10318 
10319     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10320     if (!CE) {
10321       Error(OffsetLoc, "offset must be an immediate constant");
10322       return false;
10323     }
10324 
10325     Offset = CE->getValue();
10326   }
10327 
10328   getTargetStreamer().emitMovSP(SPReg, Offset);
10329   UC.saveFPReg(SPReg);
10330 
10331   return false;
10332 }
10333 
10334 /// parseDirectiveObjectArch
10335 ///   ::= .object_arch name
10336 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
10337   MCAsmParser &Parser = getParser();
10338   if (getLexer().isNot(AsmToken::Identifier)) {
10339     Error(getLexer().getLoc(), "unexpected token");
10340     return false;
10341   }
10342 
10343   StringRef Arch = Parser.getTok().getString();
10344   SMLoc ArchLoc = Parser.getTok().getLoc();
10345   Lex();
10346 
10347   unsigned ID = ARM::parseArch(Arch);
10348 
10349   if (ID == ARM::AK_INVALID) {
10350     Error(ArchLoc, "unknown architecture '" + Arch + "'");
10351     return false;
10352   }
10353 
10354   getTargetStreamer().emitObjectArch(ID);
10355 
10356   if (getLexer().isNot(AsmToken::EndOfStatement)) {
10357     Error(getLexer().getLoc(), "unexpected token");
10358   }
10359 
10360   return false;
10361 }
10362 
10363 /// parseDirectiveAlign
10364 ///   ::= .align
10365 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
10366   // NOTE: if this is not the end of the statement, fall back to the target
10367   // agnostic handling for this directive which will correctly handle this.
10368   if (getLexer().isNot(AsmToken::EndOfStatement))
10369     return true;
10370 
10371   // '.align' is target specifically handled to mean 2**2 byte alignment.
10372   const MCSection *Section = getStreamer().getCurrentSectionOnly();
10373   assert(Section && "must have section to emit alignment");
10374   if (Section->UseCodeAlign())
10375     getStreamer().EmitCodeAlignment(4, 0);
10376   else
10377     getStreamer().EmitValueToAlignment(4, 0, 1, 0);
10378 
10379   return false;
10380 }
10381 
10382 /// parseDirectiveThumbSet
10383 ///  ::= .thumb_set name, value
10384 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
10385   MCAsmParser &Parser = getParser();
10386 
10387   StringRef Name;
10388   if (Parser.parseIdentifier(Name)) {
10389     TokError("expected identifier after '.thumb_set'");
10390     return false;
10391   }
10392 
10393   if (getLexer().isNot(AsmToken::Comma)) {
10394     TokError("expected comma after name '" + Name + "'");
10395     return false;
10396   }
10397   Lex();
10398 
10399   MCSymbol *Sym;
10400   const MCExpr *Value;
10401   if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
10402                                                Parser, Sym, Value))
10403     return true;
10404 
10405   getTargetStreamer().emitThumbSet(Sym, Value);
10406   return false;
10407 }
10408 
10409 /// Force static initialization.
10410 extern "C" void LLVMInitializeARMAsmParser() {
10411   RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
10412   RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
10413   RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
10414   RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
10415 }
10416 
10417 #define GET_REGISTER_MATCHER
10418 #define GET_SUBTARGET_FEATURE_NAME
10419 #define GET_MATCHER_IMPLEMENTATION
10420 #include "ARMGenAsmMatcher.inc"
10421 
10422 // FIXME: This structure should be moved inside ARMTargetParser
10423 // when we start to table-generate them, and we can use the ARM
10424 // flags below, that were generated by table-gen.
10425 static const struct {
10426   const unsigned Kind;
10427   const uint64_t ArchCheck;
10428   const FeatureBitset Features;
10429 } Extensions[] = {
10430   { ARM::AEK_CRC, Feature_HasV8, {ARM::FeatureCRC} },
10431   { ARM::AEK_CRYPTO,  Feature_HasV8,
10432     {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
10433   { ARM::AEK_FP, Feature_HasV8, {ARM::FeatureFPARMv8} },
10434   { (ARM::AEK_HWDIV | ARM::AEK_HWDIVARM), Feature_HasV7 | Feature_IsNotMClass,
10435     {ARM::FeatureHWDiv, ARM::FeatureHWDivARM} },
10436   { ARM::AEK_MP, Feature_HasV7 | Feature_IsNotMClass, {ARM::FeatureMP} },
10437   { ARM::AEK_SIMD, Feature_HasV8, {ARM::FeatureNEON, ARM::FeatureFPARMv8} },
10438   { ARM::AEK_SEC, Feature_HasV6K, {ARM::FeatureTrustZone} },
10439   // FIXME: Only available in A-class, isel not predicated
10440   { ARM::AEK_VIRT, Feature_HasV7, {ARM::FeatureVirtualization} },
10441   { ARM::AEK_FP16, Feature_HasV8_2a, {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
10442   { ARM::AEK_RAS, Feature_HasV8, {ARM::FeatureRAS} },
10443   // FIXME: Unsupported extensions.
10444   { ARM::AEK_OS, Feature_None, {} },
10445   { ARM::AEK_IWMMXT, Feature_None, {} },
10446   { ARM::AEK_IWMMXT2, Feature_None, {} },
10447   { ARM::AEK_MAVERICK, Feature_None, {} },
10448   { ARM::AEK_XSCALE, Feature_None, {} },
10449 };
10450 
10451 /// parseDirectiveArchExtension
10452 ///   ::= .arch_extension [no]feature
10453 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
10454   MCAsmParser &Parser = getParser();
10455 
10456   if (getLexer().isNot(AsmToken::Identifier)) {
10457     Error(getLexer().getLoc(), "expected architecture extension name");
10458     return false;
10459   }
10460 
10461   StringRef Name = Parser.getTok().getString();
10462   SMLoc ExtLoc = Parser.getTok().getLoc();
10463   Lex();
10464 
10465   bool EnableFeature = true;
10466   if (Name.startswith_lower("no")) {
10467     EnableFeature = false;
10468     Name = Name.substr(2);
10469   }
10470   unsigned FeatureKind = ARM::parseArchExt(Name);
10471   if (FeatureKind == ARM::AEK_INVALID) {
10472     Error(ExtLoc, "unknown architectural extension: " + Name);
10473     return false;
10474   }
10475 
10476   for (const auto &Extension : Extensions) {
10477     if (Extension.Kind != FeatureKind)
10478       continue;
10479 
10480     if (Extension.Features.none()) {
10481       Error(ExtLoc, "unsupported architectural extension: " + Name);
10482       return false;
10483     }
10484 
10485     if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck) {
10486       Error(ExtLoc, "architectural extension '" + Name + "' is not "
10487             "allowed for the current base architecture");
10488       return false;
10489     }
10490 
10491     MCSubtargetInfo &STI = copySTI();
10492     FeatureBitset ToggleFeatures = EnableFeature
10493       ? (~STI.getFeatureBits() & Extension.Features)
10494       : ( STI.getFeatureBits() & Extension.Features);
10495 
10496     uint64_t Features =
10497         ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
10498     setAvailableFeatures(Features);
10499     return false;
10500   }
10501 
10502   Error(ExtLoc, "unknown architectural extension: " + Name);
10503   return false;
10504 }
10505 
10506 // Define this matcher function after the auto-generated include so we
10507 // have the match class enum definitions.
10508 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
10509                                                   unsigned Kind) {
10510   ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
10511   // If the kind is a token for a literal immediate, check if our asm
10512   // operand matches. This is for InstAliases which have a fixed-value
10513   // immediate in the syntax.
10514   switch (Kind) {
10515   default: break;
10516   case MCK__35_0:
10517     if (Op.isImm())
10518       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
10519         if (CE->getValue() == 0)
10520           return Match_Success;
10521     break;
10522   case MCK_ModImm:
10523     if (Op.isImm()) {
10524       const MCExpr *SOExpr = Op.getImm();
10525       int64_t Value;
10526       if (!SOExpr->evaluateAsAbsolute(Value))
10527         return Match_Success;
10528       assert((Value >= INT32_MIN && Value <= UINT32_MAX) &&
10529              "expression value must be representable in 32 bits");
10530     }
10531     break;
10532   case MCK_rGPR:
10533     if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
10534       return Match_Success;
10535     break;
10536   case MCK_GPRPair:
10537     if (Op.isReg() &&
10538         MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
10539       return Match_Success;
10540     break;
10541   }
10542   return Match_InvalidOperand;
10543 }
10544