1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "ARMFeatures.h"
11 #include "Utils/ARMBaseInfo.h"
12 #include "MCTargetDesc/ARMAddressingModes.h"
13 #include "MCTargetDesc/ARMBaseInfo.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/MC/MCContext.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/MC/MCInst.h"
30 #include "llvm/MC/MCInstrDesc.h"
31 #include "llvm/MC/MCInstrInfo.h"
32 #include "llvm/MC/MCObjectFileInfo.h"
33 #include "llvm/MC/MCParser/MCAsmLexer.h"
34 #include "llvm/MC/MCParser/MCAsmParser.h"
35 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
36 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
37 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
39 #include "llvm/MC/MCRegisterInfo.h"
40 #include "llvm/MC/MCSection.h"
41 #include "llvm/MC/MCStreamer.h"
42 #include "llvm/MC/MCSubtargetInfo.h"
43 #include "llvm/MC/MCSymbol.h"
44 #include "llvm/MC/SubtargetFeature.h"
45 #include "llvm/Support/ARMBuildAttributes.h"
46 #include "llvm/Support/ARMEHABI.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Support/SMLoc.h"
53 #include "llvm/Support/TargetParser.h"
54 #include "llvm/Support/TargetRegistry.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include <algorithm>
57 #include <cassert>
58 #include <cstddef>
59 #include <cstdint>
60 #include <iterator>
61 #include <limits>
62 #include <memory>
63 #include <string>
64 #include <utility>
65 #include <vector>
66 
67 #define DEBUG_TYPE "asm-parser"
68 
69 using namespace llvm;
70 
71 namespace {
72 
73 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
74 
75 static cl::opt<ImplicitItModeTy> ImplicitItMode(
76     "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
77     cl::desc("Allow conditional instructions outdside of an IT block"),
78     cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
79                           "Accept in both ISAs, emit implicit ITs in Thumb"),
80                clEnumValN(ImplicitItModeTy::Never, "never",
81                           "Warn in ARM, reject in Thumb"),
82                clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
83                           "Accept in ARM, reject in Thumb"),
84                clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
85                           "Warn in ARM, emit implicit ITs in Thumb")));
86 
87 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
88                                         cl::init(false));
89 
90 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
91 
92 class UnwindContext {
93   using Locs = SmallVector<SMLoc, 4>;
94 
95   MCAsmParser &Parser;
96   Locs FnStartLocs;
97   Locs CantUnwindLocs;
98   Locs PersonalityLocs;
99   Locs PersonalityIndexLocs;
100   Locs HandlerDataLocs;
101   int FPReg;
102 
103 public:
104   UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
105 
106   bool hasFnStart() const { return !FnStartLocs.empty(); }
107   bool cantUnwind() const { return !CantUnwindLocs.empty(); }
108   bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
109 
110   bool hasPersonality() const {
111     return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
112   }
113 
114   void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
115   void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
116   void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
117   void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
118   void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
119 
120   void saveFPReg(int Reg) { FPReg = Reg; }
121   int getFPReg() const { return FPReg; }
122 
123   void emitFnStartLocNotes() const {
124     for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
125          FI != FE; ++FI)
126       Parser.Note(*FI, ".fnstart was specified here");
127   }
128 
129   void emitCantUnwindLocNotes() const {
130     for (Locs::const_iterator UI = CantUnwindLocs.begin(),
131                               UE = CantUnwindLocs.end(); UI != UE; ++UI)
132       Parser.Note(*UI, ".cantunwind was specified here");
133   }
134 
135   void emitHandlerDataLocNotes() const {
136     for (Locs::const_iterator HI = HandlerDataLocs.begin(),
137                               HE = HandlerDataLocs.end(); HI != HE; ++HI)
138       Parser.Note(*HI, ".handlerdata was specified here");
139   }
140 
141   void emitPersonalityLocNotes() const {
142     for (Locs::const_iterator PI = PersonalityLocs.begin(),
143                               PE = PersonalityLocs.end(),
144                               PII = PersonalityIndexLocs.begin(),
145                               PIE = PersonalityIndexLocs.end();
146          PI != PE || PII != PIE;) {
147       if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
148         Parser.Note(*PI++, ".personality was specified here");
149       else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
150         Parser.Note(*PII++, ".personalityindex was specified here");
151       else
152         llvm_unreachable(".personality and .personalityindex cannot be "
153                          "at the same location");
154     }
155   }
156 
157   void reset() {
158     FnStartLocs = Locs();
159     CantUnwindLocs = Locs();
160     PersonalityLocs = Locs();
161     HandlerDataLocs = Locs();
162     PersonalityIndexLocs = Locs();
163     FPReg = ARM::SP;
164   }
165 };
166 
167 class ARMAsmParser : public MCTargetAsmParser {
168   const MCRegisterInfo *MRI;
169   UnwindContext UC;
170 
171   ARMTargetStreamer &getTargetStreamer() {
172     assert(getParser().getStreamer().getTargetStreamer() &&
173            "do not have a target streamer");
174     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
175     return static_cast<ARMTargetStreamer &>(TS);
176   }
177 
178   // Map of register aliases registers via the .req directive.
179   StringMap<unsigned> RegisterReqs;
180 
181   bool NextSymbolIsThumb;
182 
183   bool useImplicitITThumb() const {
184     return ImplicitItMode == ImplicitItModeTy::Always ||
185            ImplicitItMode == ImplicitItModeTy::ThumbOnly;
186   }
187 
188   bool useImplicitITARM() const {
189     return ImplicitItMode == ImplicitItModeTy::Always ||
190            ImplicitItMode == ImplicitItModeTy::ARMOnly;
191   }
192 
193   struct {
194     ARMCC::CondCodes Cond;    // Condition for IT block.
195     unsigned Mask:4;          // Condition mask for instructions.
196                               // Starting at first 1 (from lsb).
197                               //   '1'  condition as indicated in IT.
198                               //   '0'  inverse of condition (else).
199                               // Count of instructions in IT block is
200                               // 4 - trailingzeroes(mask)
201                               // Note that this does not have the same encoding
202                               // as in the IT instruction, which also depends
203                               // on the low bit of the condition code.
204 
205     unsigned CurPosition;     // Current position in parsing of IT
206                               // block. In range [0,4], with 0 being the IT
207                               // instruction itself. Initialized according to
208                               // count of instructions in block.  ~0U if no
209                               // active IT block.
210 
211     bool IsExplicit;          // true  - The IT instruction was present in the
212                               //         input, we should not modify it.
213                               // false - The IT instruction was added
214                               //         implicitly, we can extend it if that
215                               //         would be legal.
216   } ITState;
217 
218   SmallVector<MCInst, 4> PendingConditionalInsts;
219 
220   void flushPendingInstructions(MCStreamer &Out) override {
221     if (!inImplicitITBlock()) {
222       assert(PendingConditionalInsts.size() == 0);
223       return;
224     }
225 
226     // Emit the IT instruction
227     unsigned Mask = getITMaskEncoding();
228     MCInst ITInst;
229     ITInst.setOpcode(ARM::t2IT);
230     ITInst.addOperand(MCOperand::createImm(ITState.Cond));
231     ITInst.addOperand(MCOperand::createImm(Mask));
232     Out.EmitInstruction(ITInst, getSTI());
233 
234     // Emit the conditonal instructions
235     assert(PendingConditionalInsts.size() <= 4);
236     for (const MCInst &Inst : PendingConditionalInsts) {
237       Out.EmitInstruction(Inst, getSTI());
238     }
239     PendingConditionalInsts.clear();
240 
241     // Clear the IT state
242     ITState.Mask = 0;
243     ITState.CurPosition = ~0U;
244   }
245 
246   bool inITBlock() { return ITState.CurPosition != ~0U; }
247   bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
248   bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
249 
250   bool lastInITBlock() {
251     return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
252   }
253 
254   void forwardITPosition() {
255     if (!inITBlock()) return;
256     // Move to the next instruction in the IT block, if there is one. If not,
257     // mark the block as done, except for implicit IT blocks, which we leave
258     // open until we find an instruction that can't be added to it.
259     unsigned TZ = countTrailingZeros(ITState.Mask);
260     if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
261       ITState.CurPosition = ~0U; // Done with the IT block after this.
262   }
263 
264   // Rewind the state of the current IT block, removing the last slot from it.
265   void rewindImplicitITPosition() {
266     assert(inImplicitITBlock());
267     assert(ITState.CurPosition > 1);
268     ITState.CurPosition--;
269     unsigned TZ = countTrailingZeros(ITState.Mask);
270     unsigned NewMask = 0;
271     NewMask |= ITState.Mask & (0xC << TZ);
272     NewMask |= 0x2 << TZ;
273     ITState.Mask = NewMask;
274   }
275 
276   // Rewind the state of the current IT block, removing the last slot from it.
277   // If we were at the first slot, this closes the IT block.
278   void discardImplicitITBlock() {
279     assert(inImplicitITBlock());
280     assert(ITState.CurPosition == 1);
281     ITState.CurPosition = ~0U;
282   }
283 
284   // Return the low-subreg of a given Q register.
285   unsigned getDRegFromQReg(unsigned QReg) const {
286     return MRI->getSubReg(QReg, ARM::dsub_0);
287   }
288 
289   // Get the encoding of the IT mask, as it will appear in an IT instruction.
290   unsigned getITMaskEncoding() {
291     assert(inITBlock());
292     unsigned Mask = ITState.Mask;
293     unsigned TZ = countTrailingZeros(Mask);
294     if ((ITState.Cond & 1) == 0) {
295       assert(Mask && TZ <= 3 && "illegal IT mask value!");
296       Mask ^= (0xE << TZ) & 0xF;
297     }
298     return Mask;
299   }
300 
301   // Get the condition code corresponding to the current IT block slot.
302   ARMCC::CondCodes currentITCond() {
303     unsigned MaskBit;
304     if (ITState.CurPosition == 1)
305       MaskBit = 1;
306     else
307       MaskBit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
308 
309     return MaskBit ? ITState.Cond : ARMCC::getOppositeCondition(ITState.Cond);
310   }
311 
312   // Invert the condition of the current IT block slot without changing any
313   // other slots in the same block.
314   void invertCurrentITCondition() {
315     if (ITState.CurPosition == 1) {
316       ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
317     } else {
318       ITState.Mask ^= 1 << (5 - ITState.CurPosition);
319     }
320   }
321 
322   // Returns true if the current IT block is full (all 4 slots used).
323   bool isITBlockFull() {
324     return inITBlock() && (ITState.Mask & 1);
325   }
326 
327   // Extend the current implicit IT block to have one more slot with the given
328   // condition code.
329   void extendImplicitITBlock(ARMCC::CondCodes Cond) {
330     assert(inImplicitITBlock());
331     assert(!isITBlockFull());
332     assert(Cond == ITState.Cond ||
333            Cond == ARMCC::getOppositeCondition(ITState.Cond));
334     unsigned TZ = countTrailingZeros(ITState.Mask);
335     unsigned NewMask = 0;
336     // Keep any existing condition bits.
337     NewMask |= ITState.Mask & (0xE << TZ);
338     // Insert the new condition bit.
339     NewMask |= (Cond == ITState.Cond) << TZ;
340     // Move the trailing 1 down one bit.
341     NewMask |= 1 << (TZ - 1);
342     ITState.Mask = NewMask;
343   }
344 
345   // Create a new implicit IT block with a dummy condition code.
346   void startImplicitITBlock() {
347     assert(!inITBlock());
348     ITState.Cond = ARMCC::AL;
349     ITState.Mask = 8;
350     ITState.CurPosition = 1;
351     ITState.IsExplicit = false;
352   }
353 
354   // Create a new explicit IT block with the given condition and mask. The mask
355   // should be in the parsed format, with a 1 implying 't', regardless of the
356   // low bit of the condition.
357   void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
358     assert(!inITBlock());
359     ITState.Cond = Cond;
360     ITState.Mask = Mask;
361     ITState.CurPosition = 0;
362     ITState.IsExplicit = true;
363   }
364 
365   void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
366     return getParser().Note(L, Msg, Range);
367   }
368 
369   bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
370     return getParser().Warning(L, Msg, Range);
371   }
372 
373   bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
374     return getParser().Error(L, Msg, Range);
375   }
376 
377   bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
378                            unsigned ListNo, bool IsARPop = false);
379   bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
380                            unsigned ListNo);
381 
382   int tryParseRegister();
383   bool tryParseRegisterWithWriteBack(OperandVector &);
384   int tryParseShiftRegister(OperandVector &);
385   bool parseRegisterList(OperandVector &);
386   bool parseMemory(OperandVector &);
387   bool parseOperand(OperandVector &, StringRef Mnemonic);
388   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
389   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
390                               unsigned &ShiftAmount);
391   bool parseLiteralValues(unsigned Size, SMLoc L);
392   bool parseDirectiveThumb(SMLoc L);
393   bool parseDirectiveARM(SMLoc L);
394   bool parseDirectiveThumbFunc(SMLoc L);
395   bool parseDirectiveCode(SMLoc L);
396   bool parseDirectiveSyntax(SMLoc L);
397   bool parseDirectiveReq(StringRef Name, SMLoc L);
398   bool parseDirectiveUnreq(SMLoc L);
399   bool parseDirectiveArch(SMLoc L);
400   bool parseDirectiveEabiAttr(SMLoc L);
401   bool parseDirectiveCPU(SMLoc L);
402   bool parseDirectiveFPU(SMLoc L);
403   bool parseDirectiveFnStart(SMLoc L);
404   bool parseDirectiveFnEnd(SMLoc L);
405   bool parseDirectiveCantUnwind(SMLoc L);
406   bool parseDirectivePersonality(SMLoc L);
407   bool parseDirectiveHandlerData(SMLoc L);
408   bool parseDirectiveSetFP(SMLoc L);
409   bool parseDirectivePad(SMLoc L);
410   bool parseDirectiveRegSave(SMLoc L, bool IsVector);
411   bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
412   bool parseDirectiveLtorg(SMLoc L);
413   bool parseDirectiveEven(SMLoc L);
414   bool parseDirectivePersonalityIndex(SMLoc L);
415   bool parseDirectiveUnwindRaw(SMLoc L);
416   bool parseDirectiveTLSDescSeq(SMLoc L);
417   bool parseDirectiveMovSP(SMLoc L);
418   bool parseDirectiveObjectArch(SMLoc L);
419   bool parseDirectiveArchExtension(SMLoc L);
420   bool parseDirectiveAlign(SMLoc L);
421   bool parseDirectiveThumbSet(SMLoc L);
422 
423   StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
424                           bool &CarrySetting, unsigned &ProcessorIMod,
425                           StringRef &ITMask);
426   void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
427                              bool &CanAcceptCarrySet,
428                              bool &CanAcceptPredicationCode);
429 
430   void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
431                                      OperandVector &Operands);
432   bool isThumb() const {
433     // FIXME: Can tablegen auto-generate this?
434     return getSTI().getFeatureBits()[ARM::ModeThumb];
435   }
436 
437   bool isThumbOne() const {
438     return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
439   }
440 
441   bool isThumbTwo() const {
442     return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
443   }
444 
445   bool hasThumb() const {
446     return getSTI().getFeatureBits()[ARM::HasV4TOps];
447   }
448 
449   bool hasThumb2() const {
450     return getSTI().getFeatureBits()[ARM::FeatureThumb2];
451   }
452 
453   bool hasV6Ops() const {
454     return getSTI().getFeatureBits()[ARM::HasV6Ops];
455   }
456 
457   bool hasV6T2Ops() const {
458     return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
459   }
460 
461   bool hasV6MOps() const {
462     return getSTI().getFeatureBits()[ARM::HasV6MOps];
463   }
464 
465   bool hasV7Ops() const {
466     return getSTI().getFeatureBits()[ARM::HasV7Ops];
467   }
468 
469   bool hasV8Ops() const {
470     return getSTI().getFeatureBits()[ARM::HasV8Ops];
471   }
472 
473   bool hasV8MBaseline() const {
474     return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
475   }
476 
477   bool hasV8MMainline() const {
478     return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
479   }
480 
481   bool has8MSecExt() const {
482     return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
483   }
484 
485   bool hasARM() const {
486     return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
487   }
488 
489   bool hasDSP() const {
490     return getSTI().getFeatureBits()[ARM::FeatureDSP];
491   }
492 
493   bool hasD16() const {
494     return getSTI().getFeatureBits()[ARM::FeatureD16];
495   }
496 
497   bool hasV8_1aOps() const {
498     return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
499   }
500 
501   bool hasRAS() const {
502     return getSTI().getFeatureBits()[ARM::FeatureRAS];
503   }
504 
505   void SwitchMode() {
506     MCSubtargetInfo &STI = copySTI();
507     uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
508     setAvailableFeatures(FB);
509   }
510 
511   void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
512 
513   bool isMClass() const {
514     return getSTI().getFeatureBits()[ARM::FeatureMClass];
515   }
516 
517   /// @name Auto-generated Match Functions
518   /// {
519 
520 #define GET_ASSEMBLER_HEADER
521 #include "ARMGenAsmMatcher.inc"
522 
523   /// }
524 
525   OperandMatchResultTy parseITCondCode(OperandVector &);
526   OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
527   OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
528   OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
529   OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
530   OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
531   OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
532   OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
533   OperandMatchResultTy parseBankedRegOperand(OperandVector &);
534   OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
535                                    int High);
536   OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
537     return parsePKHImm(O, "lsl", 0, 31);
538   }
539   OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
540     return parsePKHImm(O, "asr", 1, 32);
541   }
542   OperandMatchResultTy parseSetEndImm(OperandVector &);
543   OperandMatchResultTy parseShifterImm(OperandVector &);
544   OperandMatchResultTy parseRotImm(OperandVector &);
545   OperandMatchResultTy parseModImm(OperandVector &);
546   OperandMatchResultTy parseBitfield(OperandVector &);
547   OperandMatchResultTy parsePostIdxReg(OperandVector &);
548   OperandMatchResultTy parseAM3Offset(OperandVector &);
549   OperandMatchResultTy parseFPImm(OperandVector &);
550   OperandMatchResultTy parseVectorList(OperandVector &);
551   OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
552                                        SMLoc &EndLoc);
553 
554   // Asm Match Converter Methods
555   void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
556   void cvtThumbBranches(MCInst &Inst, const OperandVector &);
557 
558   bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
559   bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
560   bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
561   bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
562   bool isITBlockTerminator(MCInst &Inst) const;
563   void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
564 
565 public:
566   enum ARMMatchResultTy {
567     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
568     Match_RequiresNotITBlock,
569     Match_RequiresV6,
570     Match_RequiresThumb2,
571     Match_RequiresV8,
572     Match_RequiresFlagSetting,
573 #define GET_OPERAND_DIAGNOSTIC_TYPES
574 #include "ARMGenAsmMatcher.inc"
575 
576   };
577 
578   ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
579                const MCInstrInfo &MII, const MCTargetOptions &Options)
580     : MCTargetAsmParser(Options, STI, MII), UC(Parser) {
581     MCAsmParserExtension::Initialize(Parser);
582 
583     // Cache the MCRegisterInfo.
584     MRI = getContext().getRegisterInfo();
585 
586     // Initialize the set of available features.
587     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
588 
589     // Add build attributes based on the selected target.
590     if (AddBuildAttributes)
591       getTargetStreamer().emitTargetAttributes(STI);
592 
593     // Not in an ITBlock to start with.
594     ITState.CurPosition = ~0U;
595 
596     NextSymbolIsThumb = false;
597   }
598 
599   // Implementation of the MCTargetAsmParser interface:
600   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
601   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
602                         SMLoc NameLoc, OperandVector &Operands) override;
603   bool ParseDirective(AsmToken DirectiveID) override;
604 
605   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
606                                       unsigned Kind) override;
607   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
608 
609   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
610                                OperandVector &Operands, MCStreamer &Out,
611                                uint64_t &ErrorInfo,
612                                bool MatchingInlineAsm) override;
613   unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
614                             SmallVectorImpl<NearMissInfo> &NearMisses,
615                             bool MatchingInlineAsm, bool &EmitInITBlock,
616                             MCStreamer &Out);
617 
618   struct NearMissMessage {
619     SMLoc Loc;
620     SmallString<128> Message;
621   };
622 
623   const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
624 
625   void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
626                         SmallVectorImpl<NearMissMessage> &NearMissesOut,
627                         SMLoc IDLoc, OperandVector &Operands);
628   void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
629                         OperandVector &Operands);
630 
631   void onLabelParsed(MCSymbol *Symbol) override;
632 };
633 
634 /// ARMOperand - Instances of this class represent a parsed ARM machine
635 /// operand.
636 class ARMOperand : public MCParsedAsmOperand {
637   enum KindTy {
638     k_CondCode,
639     k_CCOut,
640     k_ITCondMask,
641     k_CoprocNum,
642     k_CoprocReg,
643     k_CoprocOption,
644     k_Immediate,
645     k_MemBarrierOpt,
646     k_InstSyncBarrierOpt,
647     k_Memory,
648     k_PostIndexRegister,
649     k_MSRMask,
650     k_BankedReg,
651     k_ProcIFlags,
652     k_VectorIndex,
653     k_Register,
654     k_RegisterList,
655     k_DPRRegisterList,
656     k_SPRRegisterList,
657     k_VectorList,
658     k_VectorListAllLanes,
659     k_VectorListIndexed,
660     k_ShiftedRegister,
661     k_ShiftedImmediate,
662     k_ShifterImmediate,
663     k_RotateImmediate,
664     k_ModifiedImmediate,
665     k_ConstantPoolImmediate,
666     k_BitfieldDescriptor,
667     k_Token,
668   } Kind;
669 
670   SMLoc StartLoc, EndLoc, AlignmentLoc;
671   SmallVector<unsigned, 8> Registers;
672 
673   struct CCOp {
674     ARMCC::CondCodes Val;
675   };
676 
677   struct CopOp {
678     unsigned Val;
679   };
680 
681   struct CoprocOptionOp {
682     unsigned Val;
683   };
684 
685   struct ITMaskOp {
686     unsigned Mask:4;
687   };
688 
689   struct MBOptOp {
690     ARM_MB::MemBOpt Val;
691   };
692 
693   struct ISBOptOp {
694     ARM_ISB::InstSyncBOpt Val;
695   };
696 
697   struct IFlagsOp {
698     ARM_PROC::IFlags Val;
699   };
700 
701   struct MMaskOp {
702     unsigned Val;
703   };
704 
705   struct BankedRegOp {
706     unsigned Val;
707   };
708 
709   struct TokOp {
710     const char *Data;
711     unsigned Length;
712   };
713 
714   struct RegOp {
715     unsigned RegNum;
716   };
717 
718   // A vector register list is a sequential list of 1 to 4 registers.
719   struct VectorListOp {
720     unsigned RegNum;
721     unsigned Count;
722     unsigned LaneIndex;
723     bool isDoubleSpaced;
724   };
725 
726   struct VectorIndexOp {
727     unsigned Val;
728   };
729 
730   struct ImmOp {
731     const MCExpr *Val;
732   };
733 
734   /// Combined record for all forms of ARM address expressions.
735   struct MemoryOp {
736     unsigned BaseRegNum;
737     // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
738     // was specified.
739     const MCConstantExpr *OffsetImm;  // Offset immediate value
740     unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
741     ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
742     unsigned ShiftImm;        // shift for OffsetReg.
743     unsigned Alignment;       // 0 = no alignment specified
744     // n = alignment in bytes (2, 4, 8, 16, or 32)
745     unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
746   };
747 
748   struct PostIdxRegOp {
749     unsigned RegNum;
750     bool isAdd;
751     ARM_AM::ShiftOpc ShiftTy;
752     unsigned ShiftImm;
753   };
754 
755   struct ShifterImmOp {
756     bool isASR;
757     unsigned Imm;
758   };
759 
760   struct RegShiftedRegOp {
761     ARM_AM::ShiftOpc ShiftTy;
762     unsigned SrcReg;
763     unsigned ShiftReg;
764     unsigned ShiftImm;
765   };
766 
767   struct RegShiftedImmOp {
768     ARM_AM::ShiftOpc ShiftTy;
769     unsigned SrcReg;
770     unsigned ShiftImm;
771   };
772 
773   struct RotImmOp {
774     unsigned Imm;
775   };
776 
777   struct ModImmOp {
778     unsigned Bits;
779     unsigned Rot;
780   };
781 
782   struct BitfieldOp {
783     unsigned LSB;
784     unsigned Width;
785   };
786 
787   union {
788     struct CCOp CC;
789     struct CopOp Cop;
790     struct CoprocOptionOp CoprocOption;
791     struct MBOptOp MBOpt;
792     struct ISBOptOp ISBOpt;
793     struct ITMaskOp ITMask;
794     struct IFlagsOp IFlags;
795     struct MMaskOp MMask;
796     struct BankedRegOp BankedReg;
797     struct TokOp Tok;
798     struct RegOp Reg;
799     struct VectorListOp VectorList;
800     struct VectorIndexOp VectorIndex;
801     struct ImmOp Imm;
802     struct MemoryOp Memory;
803     struct PostIdxRegOp PostIdxReg;
804     struct ShifterImmOp ShifterImm;
805     struct RegShiftedRegOp RegShiftedReg;
806     struct RegShiftedImmOp RegShiftedImm;
807     struct RotImmOp RotImm;
808     struct ModImmOp ModImm;
809     struct BitfieldOp Bitfield;
810   };
811 
812 public:
813   ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
814 
815   /// getStartLoc - Get the location of the first token of this operand.
816   SMLoc getStartLoc() const override { return StartLoc; }
817 
818   /// getEndLoc - Get the location of the last token of this operand.
819   SMLoc getEndLoc() const override { return EndLoc; }
820 
821   /// getLocRange - Get the range between the first and last token of this
822   /// operand.
823   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
824 
825   /// getAlignmentLoc - Get the location of the Alignment token of this operand.
826   SMLoc getAlignmentLoc() const {
827     assert(Kind == k_Memory && "Invalid access!");
828     return AlignmentLoc;
829   }
830 
831   ARMCC::CondCodes getCondCode() const {
832     assert(Kind == k_CondCode && "Invalid access!");
833     return CC.Val;
834   }
835 
836   unsigned getCoproc() const {
837     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
838     return Cop.Val;
839   }
840 
841   StringRef getToken() const {
842     assert(Kind == k_Token && "Invalid access!");
843     return StringRef(Tok.Data, Tok.Length);
844   }
845 
846   unsigned getReg() const override {
847     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
848     return Reg.RegNum;
849   }
850 
851   const SmallVectorImpl<unsigned> &getRegList() const {
852     assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
853             Kind == k_SPRRegisterList) && "Invalid access!");
854     return Registers;
855   }
856 
857   const MCExpr *getImm() const {
858     assert(isImm() && "Invalid access!");
859     return Imm.Val;
860   }
861 
862   const MCExpr *getConstantPoolImm() const {
863     assert(isConstantPoolImm() && "Invalid access!");
864     return Imm.Val;
865   }
866 
867   unsigned getVectorIndex() const {
868     assert(Kind == k_VectorIndex && "Invalid access!");
869     return VectorIndex.Val;
870   }
871 
872   ARM_MB::MemBOpt getMemBarrierOpt() const {
873     assert(Kind == k_MemBarrierOpt && "Invalid access!");
874     return MBOpt.Val;
875   }
876 
877   ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
878     assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
879     return ISBOpt.Val;
880   }
881 
882   ARM_PROC::IFlags getProcIFlags() const {
883     assert(Kind == k_ProcIFlags && "Invalid access!");
884     return IFlags.Val;
885   }
886 
887   unsigned getMSRMask() const {
888     assert(Kind == k_MSRMask && "Invalid access!");
889     return MMask.Val;
890   }
891 
892   unsigned getBankedReg() const {
893     assert(Kind == k_BankedReg && "Invalid access!");
894     return BankedReg.Val;
895   }
896 
897   bool isCoprocNum() const { return Kind == k_CoprocNum; }
898   bool isCoprocReg() const { return Kind == k_CoprocReg; }
899   bool isCoprocOption() const { return Kind == k_CoprocOption; }
900   bool isCondCode() const { return Kind == k_CondCode; }
901   bool isCCOut() const { return Kind == k_CCOut; }
902   bool isITMask() const { return Kind == k_ITCondMask; }
903   bool isITCondCode() const { return Kind == k_CondCode; }
904   bool isImm() const override {
905     return Kind == k_Immediate;
906   }
907 
908   bool isARMBranchTarget() const {
909     if (!isImm()) return false;
910 
911     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
912       return CE->getValue() % 4 == 0;
913     return true;
914   }
915 
916 
917   bool isThumbBranchTarget() const {
918     if (!isImm()) return false;
919 
920     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
921       return CE->getValue() % 2 == 0;
922     return true;
923   }
924 
925   // checks whether this operand is an unsigned offset which fits is a field
926   // of specified width and scaled by a specific number of bits
927   template<unsigned width, unsigned scale>
928   bool isUnsignedOffset() const {
929     if (!isImm()) return false;
930     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
931     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
932       int64_t Val = CE->getValue();
933       int64_t Align = 1LL << scale;
934       int64_t Max = Align * ((1LL << width) - 1);
935       return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
936     }
937     return false;
938   }
939 
940   // checks whether this operand is an signed offset which fits is a field
941   // of specified width and scaled by a specific number of bits
942   template<unsigned width, unsigned scale>
943   bool isSignedOffset() const {
944     if (!isImm()) return false;
945     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
946     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
947       int64_t Val = CE->getValue();
948       int64_t Align = 1LL << scale;
949       int64_t Max = Align * ((1LL << (width-1)) - 1);
950       int64_t Min = -Align * (1LL << (width-1));
951       return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
952     }
953     return false;
954   }
955 
956   // checks whether this operand is a memory operand computed as an offset
957   // applied to PC. the offset may have 8 bits of magnitude and is represented
958   // with two bits of shift. textually it may be either [pc, #imm], #imm or
959   // relocable expression...
960   bool isThumbMemPC() const {
961     int64_t Val = 0;
962     if (isImm()) {
963       if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
964       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
965       if (!CE) return false;
966       Val = CE->getValue();
967     }
968     else if (isMem()) {
969       if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
970       if(Memory.BaseRegNum != ARM::PC) return false;
971       Val = Memory.OffsetImm->getValue();
972     }
973     else return false;
974     return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
975   }
976 
977   bool isFPImm() const {
978     if (!isImm()) return false;
979     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
980     if (!CE) return false;
981     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
982     return Val != -1;
983   }
984 
985   template<int64_t N, int64_t M>
986   bool isImmediate() const {
987     if (!isImm()) return false;
988     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
989     if (!CE) return false;
990     int64_t Value = CE->getValue();
991     return Value >= N && Value <= M;
992   }
993 
994   template<int64_t N, int64_t M>
995   bool isImmediateS4() const {
996     if (!isImm()) return false;
997     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
998     if (!CE) return false;
999     int64_t Value = CE->getValue();
1000     return ((Value & 3) == 0) && Value >= N && Value <= M;
1001   }
1002 
1003   bool isFBits16() const {
1004     return isImmediate<0, 17>();
1005   }
1006   bool isFBits32() const {
1007     return isImmediate<1, 33>();
1008   }
1009   bool isImm8s4() const {
1010     return isImmediateS4<-1020, 1020>();
1011   }
1012   bool isImm0_1020s4() const {
1013     return isImmediateS4<0, 1020>();
1014   }
1015   bool isImm0_508s4() const {
1016     return isImmediateS4<0, 508>();
1017   }
1018   bool isImm0_508s4Neg() const {
1019     if (!isImm()) return false;
1020     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1021     if (!CE) return false;
1022     int64_t Value = -CE->getValue();
1023     // explicitly exclude zero. we want that to use the normal 0_508 version.
1024     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1025   }
1026 
1027   bool isImm0_4095Neg() const {
1028     if (!isImm()) return false;
1029     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1030     if (!CE) return false;
1031     int64_t Value = -CE->getValue();
1032     return Value > 0 && Value < 4096;
1033   }
1034 
1035   bool isImm0_7() const {
1036     return isImmediate<0, 7>();
1037   }
1038 
1039   bool isImm1_16() const {
1040     return isImmediate<1, 16>();
1041   }
1042 
1043   bool isImm1_32() const {
1044     return isImmediate<1, 32>();
1045   }
1046 
1047   bool isImm8_255() const {
1048     return isImmediate<8, 255>();
1049   }
1050 
1051   bool isImm256_65535Expr() const {
1052     if (!isImm()) return false;
1053     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1054     // If it's not a constant expression, it'll generate a fixup and be
1055     // handled later.
1056     if (!CE) return true;
1057     int64_t Value = CE->getValue();
1058     return Value >= 256 && Value < 65536;
1059   }
1060 
1061   bool isImm0_65535Expr() const {
1062     if (!isImm()) return false;
1063     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1064     // If it's not a constant expression, it'll generate a fixup and be
1065     // handled later.
1066     if (!CE) return true;
1067     int64_t Value = CE->getValue();
1068     return Value >= 0 && Value < 65536;
1069   }
1070 
1071   bool isImm24bit() const {
1072     return isImmediate<0, 0xffffff + 1>();
1073   }
1074 
1075   bool isImmThumbSR() const {
1076     return isImmediate<1, 33>();
1077   }
1078 
1079   bool isPKHLSLImm() const {
1080     return isImmediate<0, 32>();
1081   }
1082 
1083   bool isPKHASRImm() const {
1084     return isImmediate<0, 33>();
1085   }
1086 
1087   bool isAdrLabel() const {
1088     // If we have an immediate that's not a constant, treat it as a label
1089     // reference needing a fixup.
1090     if (isImm() && !isa<MCConstantExpr>(getImm()))
1091       return true;
1092 
1093     // If it is a constant, it must fit into a modified immediate encoding.
1094     if (!isImm()) return false;
1095     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1096     if (!CE) return false;
1097     int64_t Value = CE->getValue();
1098     return (ARM_AM::getSOImmVal(Value) != -1 ||
1099             ARM_AM::getSOImmVal(-Value) != -1);
1100   }
1101 
1102   bool isT2SOImm() const {
1103     // If we have an immediate that's not a constant, treat it as an expression
1104     // needing a fixup.
1105     if (isImm() && !isa<MCConstantExpr>(getImm())) {
1106       // We want to avoid matching :upper16: and :lower16: as we want these
1107       // expressions to match in isImm0_65535Expr()
1108       const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1109       return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1110                              ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1111     }
1112     if (!isImm()) return false;
1113     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1114     if (!CE) return false;
1115     int64_t Value = CE->getValue();
1116     return ARM_AM::getT2SOImmVal(Value) != -1;
1117   }
1118 
1119   bool isT2SOImmNot() const {
1120     if (!isImm()) return false;
1121     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1122     if (!CE) return false;
1123     int64_t Value = CE->getValue();
1124     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1125       ARM_AM::getT2SOImmVal(~Value) != -1;
1126   }
1127 
1128   bool isT2SOImmNeg() const {
1129     if (!isImm()) return false;
1130     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1131     if (!CE) return false;
1132     int64_t Value = CE->getValue();
1133     // Only use this when not representable as a plain so_imm.
1134     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1135       ARM_AM::getT2SOImmVal(-Value) != -1;
1136   }
1137 
1138   bool isSetEndImm() const {
1139     if (!isImm()) return false;
1140     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1141     if (!CE) return false;
1142     int64_t Value = CE->getValue();
1143     return Value == 1 || Value == 0;
1144   }
1145 
1146   bool isReg() const override { return Kind == k_Register; }
1147   bool isRegList() const { return Kind == k_RegisterList; }
1148   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1149   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1150   bool isToken() const override { return Kind == k_Token; }
1151   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1152   bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1153   bool isMem() const override {
1154     if (Kind != k_Memory)
1155       return false;
1156     if (Memory.BaseRegNum &&
1157         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1158       return false;
1159     if (Memory.OffsetRegNum &&
1160         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1161       return false;
1162     return true;
1163   }
1164   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1165   bool isRegShiftedReg() const {
1166     return Kind == k_ShiftedRegister &&
1167            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1168                RegShiftedReg.SrcReg) &&
1169            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1170                RegShiftedReg.ShiftReg);
1171   }
1172   bool isRegShiftedImm() const {
1173     return Kind == k_ShiftedImmediate &&
1174            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1175                RegShiftedImm.SrcReg);
1176   }
1177   bool isRotImm() const { return Kind == k_RotateImmediate; }
1178   bool isModImm() const { return Kind == k_ModifiedImmediate; }
1179 
1180   bool isModImmNot() const {
1181     if (!isImm()) return false;
1182     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1183     if (!CE) return false;
1184     int64_t Value = CE->getValue();
1185     return ARM_AM::getSOImmVal(~Value) != -1;
1186   }
1187 
1188   bool isModImmNeg() const {
1189     if (!isImm()) return false;
1190     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1191     if (!CE) return false;
1192     int64_t Value = CE->getValue();
1193     return ARM_AM::getSOImmVal(Value) == -1 &&
1194       ARM_AM::getSOImmVal(-Value) != -1;
1195   }
1196 
1197   bool isThumbModImmNeg1_7() const {
1198     if (!isImm()) return false;
1199     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1200     if (!CE) return false;
1201     int32_t Value = -(int32_t)CE->getValue();
1202     return 0 < Value && Value < 8;
1203   }
1204 
1205   bool isThumbModImmNeg8_255() const {
1206     if (!isImm()) return false;
1207     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1208     if (!CE) return false;
1209     int32_t Value = -(int32_t)CE->getValue();
1210     return 7 < Value && Value < 256;
1211   }
1212 
1213   bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1214   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1215   bool isPostIdxRegShifted() const {
1216     return Kind == k_PostIndexRegister &&
1217            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1218   }
1219   bool isPostIdxReg() const {
1220     return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1221   }
1222   bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1223     if (!isMem())
1224       return false;
1225     // No offset of any kind.
1226     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1227      (alignOK || Memory.Alignment == Alignment);
1228   }
1229   bool isMemPCRelImm12() const {
1230     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1231       return false;
1232     // Base register must be PC.
1233     if (Memory.BaseRegNum != ARM::PC)
1234       return false;
1235     // Immediate offset in range [-4095, 4095].
1236     if (!Memory.OffsetImm) return true;
1237     int64_t Val = Memory.OffsetImm->getValue();
1238     return (Val > -4096 && Val < 4096) ||
1239            (Val == std::numeric_limits<int32_t>::min());
1240   }
1241 
1242   bool isAlignedMemory() const {
1243     return isMemNoOffset(true);
1244   }
1245 
1246   bool isAlignedMemoryNone() const {
1247     return isMemNoOffset(false, 0);
1248   }
1249 
1250   bool isDupAlignedMemoryNone() const {
1251     return isMemNoOffset(false, 0);
1252   }
1253 
1254   bool isAlignedMemory16() const {
1255     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1256       return true;
1257     return isMemNoOffset(false, 0);
1258   }
1259 
1260   bool isDupAlignedMemory16() const {
1261     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1262       return true;
1263     return isMemNoOffset(false, 0);
1264   }
1265 
1266   bool isAlignedMemory32() const {
1267     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1268       return true;
1269     return isMemNoOffset(false, 0);
1270   }
1271 
1272   bool isDupAlignedMemory32() const {
1273     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1274       return true;
1275     return isMemNoOffset(false, 0);
1276   }
1277 
1278   bool isAlignedMemory64() const {
1279     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1280       return true;
1281     return isMemNoOffset(false, 0);
1282   }
1283 
1284   bool isDupAlignedMemory64() const {
1285     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1286       return true;
1287     return isMemNoOffset(false, 0);
1288   }
1289 
1290   bool isAlignedMemory64or128() const {
1291     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1292       return true;
1293     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1294       return true;
1295     return isMemNoOffset(false, 0);
1296   }
1297 
1298   bool isDupAlignedMemory64or128() const {
1299     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1300       return true;
1301     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1302       return true;
1303     return isMemNoOffset(false, 0);
1304   }
1305 
1306   bool isAlignedMemory64or128or256() const {
1307     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1308       return true;
1309     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1310       return true;
1311     if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1312       return true;
1313     return isMemNoOffset(false, 0);
1314   }
1315 
1316   bool isAddrMode2() const {
1317     if (!isMem() || Memory.Alignment != 0) return false;
1318     // Check for register offset.
1319     if (Memory.OffsetRegNum) return true;
1320     // Immediate offset in range [-4095, 4095].
1321     if (!Memory.OffsetImm) return true;
1322     int64_t Val = Memory.OffsetImm->getValue();
1323     return Val > -4096 && Val < 4096;
1324   }
1325 
1326   bool isAM2OffsetImm() const {
1327     if (!isImm()) return false;
1328     // Immediate offset in range [-4095, 4095].
1329     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1330     if (!CE) return false;
1331     int64_t Val = CE->getValue();
1332     return (Val == std::numeric_limits<int32_t>::min()) ||
1333            (Val > -4096 && Val < 4096);
1334   }
1335 
1336   bool isAddrMode3() const {
1337     // If we have an immediate that's not a constant, treat it as a label
1338     // reference needing a fixup. If it is a constant, it's something else
1339     // and we reject it.
1340     if (isImm() && !isa<MCConstantExpr>(getImm()))
1341       return true;
1342     if (!isMem() || Memory.Alignment != 0) return false;
1343     // No shifts are legal for AM3.
1344     if (Memory.ShiftType != ARM_AM::no_shift) return false;
1345     // Check for register offset.
1346     if (Memory.OffsetRegNum) return true;
1347     // Immediate offset in range [-255, 255].
1348     if (!Memory.OffsetImm) return true;
1349     int64_t Val = Memory.OffsetImm->getValue();
1350     // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we
1351     // have to check for this too.
1352     return (Val > -256 && Val < 256) ||
1353            Val == std::numeric_limits<int32_t>::min();
1354   }
1355 
1356   bool isAM3Offset() const {
1357     if (isPostIdxReg())
1358       return true;
1359     if (!isImm())
1360       return false;
1361     // Immediate offset in range [-255, 255].
1362     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1363     if (!CE) return false;
1364     int64_t Val = CE->getValue();
1365     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1366     return (Val > -256 && Val < 256) ||
1367            Val == std::numeric_limits<int32_t>::min();
1368   }
1369 
1370   bool isAddrMode5() const {
1371     // If we have an immediate that's not a constant, treat it as a label
1372     // reference needing a fixup. If it is a constant, it's something else
1373     // and we reject it.
1374     if (isImm() && !isa<MCConstantExpr>(getImm()))
1375       return true;
1376     if (!isMem() || Memory.Alignment != 0) return false;
1377     // Check for register offset.
1378     if (Memory.OffsetRegNum) return false;
1379     // Immediate offset in range [-1020, 1020] and a multiple of 4.
1380     if (!Memory.OffsetImm) return true;
1381     int64_t Val = Memory.OffsetImm->getValue();
1382     return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1383       Val == std::numeric_limits<int32_t>::min();
1384   }
1385 
1386   bool isAddrMode5FP16() const {
1387     // If we have an immediate that's not a constant, treat it as a label
1388     // reference needing a fixup. If it is a constant, it's something else
1389     // and we reject it.
1390     if (isImm() && !isa<MCConstantExpr>(getImm()))
1391       return true;
1392     if (!isMem() || Memory.Alignment != 0) return false;
1393     // Check for register offset.
1394     if (Memory.OffsetRegNum) return false;
1395     // Immediate offset in range [-510, 510] and a multiple of 2.
1396     if (!Memory.OffsetImm) return true;
1397     int64_t Val = Memory.OffsetImm->getValue();
1398     return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1399            Val == std::numeric_limits<int32_t>::min();
1400   }
1401 
1402   bool isMemTBB() const {
1403     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1404         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1405       return false;
1406     return true;
1407   }
1408 
1409   bool isMemTBH() const {
1410     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1411         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1412         Memory.Alignment != 0 )
1413       return false;
1414     return true;
1415   }
1416 
1417   bool isMemRegOffset() const {
1418     if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1419       return false;
1420     return true;
1421   }
1422 
1423   bool isT2MemRegOffset() const {
1424     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1425         Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1426       return false;
1427     // Only lsl #{0, 1, 2, 3} allowed.
1428     if (Memory.ShiftType == ARM_AM::no_shift)
1429       return true;
1430     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1431       return false;
1432     return true;
1433   }
1434 
1435   bool isMemThumbRR() const {
1436     // Thumb reg+reg addressing is simple. Just two registers, a base and
1437     // an offset. No shifts, negations or any other complicating factors.
1438     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1439         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1440       return false;
1441     return isARMLowRegister(Memory.BaseRegNum) &&
1442       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1443   }
1444 
1445   bool isMemThumbRIs4() const {
1446     if (!isMem() || Memory.OffsetRegNum != 0 ||
1447         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1448       return false;
1449     // Immediate offset, multiple of 4 in range [0, 124].
1450     if (!Memory.OffsetImm) return true;
1451     int64_t Val = Memory.OffsetImm->getValue();
1452     return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1453   }
1454 
1455   bool isMemThumbRIs2() const {
1456     if (!isMem() || Memory.OffsetRegNum != 0 ||
1457         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1458       return false;
1459     // Immediate offset, multiple of 4 in range [0, 62].
1460     if (!Memory.OffsetImm) return true;
1461     int64_t Val = Memory.OffsetImm->getValue();
1462     return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1463   }
1464 
1465   bool isMemThumbRIs1() const {
1466     if (!isMem() || Memory.OffsetRegNum != 0 ||
1467         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1468       return false;
1469     // Immediate offset in range [0, 31].
1470     if (!Memory.OffsetImm) return true;
1471     int64_t Val = Memory.OffsetImm->getValue();
1472     return Val >= 0 && Val <= 31;
1473   }
1474 
1475   bool isMemThumbSPI() const {
1476     if (!isMem() || Memory.OffsetRegNum != 0 ||
1477         Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1478       return false;
1479     // Immediate offset, multiple of 4 in range [0, 1020].
1480     if (!Memory.OffsetImm) return true;
1481     int64_t Val = Memory.OffsetImm->getValue();
1482     return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1483   }
1484 
1485   bool isMemImm8s4Offset() const {
1486     // If we have an immediate that's not a constant, treat it as a label
1487     // reference needing a fixup. If it is a constant, it's something else
1488     // and we reject it.
1489     if (isImm() && !isa<MCConstantExpr>(getImm()))
1490       return true;
1491     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1492       return false;
1493     // Immediate offset a multiple of 4 in range [-1020, 1020].
1494     if (!Memory.OffsetImm) return true;
1495     int64_t Val = Memory.OffsetImm->getValue();
1496     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1497     return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1498            Val == std::numeric_limits<int32_t>::min();
1499   }
1500 
1501   bool isMemImm0_1020s4Offset() const {
1502     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1503       return false;
1504     // Immediate offset a multiple of 4 in range [0, 1020].
1505     if (!Memory.OffsetImm) return true;
1506     int64_t Val = Memory.OffsetImm->getValue();
1507     return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1508   }
1509 
1510   bool isMemImm8Offset() const {
1511     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1512       return false;
1513     // Base reg of PC isn't allowed for these encodings.
1514     if (Memory.BaseRegNum == ARM::PC) return false;
1515     // Immediate offset in range [-255, 255].
1516     if (!Memory.OffsetImm) return true;
1517     int64_t Val = Memory.OffsetImm->getValue();
1518     return (Val == std::numeric_limits<int32_t>::min()) ||
1519            (Val > -256 && Val < 256);
1520   }
1521 
1522   bool isMemPosImm8Offset() const {
1523     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1524       return false;
1525     // Immediate offset in range [0, 255].
1526     if (!Memory.OffsetImm) return true;
1527     int64_t Val = Memory.OffsetImm->getValue();
1528     return Val >= 0 && Val < 256;
1529   }
1530 
1531   bool isMemNegImm8Offset() const {
1532     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1533       return false;
1534     // Base reg of PC isn't allowed for these encodings.
1535     if (Memory.BaseRegNum == ARM::PC) return false;
1536     // Immediate offset in range [-255, -1].
1537     if (!Memory.OffsetImm) return false;
1538     int64_t Val = Memory.OffsetImm->getValue();
1539     return (Val == std::numeric_limits<int32_t>::min()) ||
1540            (Val > -256 && Val < 0);
1541   }
1542 
1543   bool isMemUImm12Offset() const {
1544     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1545       return false;
1546     // Immediate offset in range [0, 4095].
1547     if (!Memory.OffsetImm) return true;
1548     int64_t Val = Memory.OffsetImm->getValue();
1549     return (Val >= 0 && Val < 4096);
1550   }
1551 
1552   bool isMemImm12Offset() const {
1553     // If we have an immediate that's not a constant, treat it as a label
1554     // reference needing a fixup. If it is a constant, it's something else
1555     // and we reject it.
1556 
1557     if (isImm() && !isa<MCConstantExpr>(getImm()))
1558       return true;
1559 
1560     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1561       return false;
1562     // Immediate offset in range [-4095, 4095].
1563     if (!Memory.OffsetImm) return true;
1564     int64_t Val = Memory.OffsetImm->getValue();
1565     return (Val > -4096 && Val < 4096) ||
1566            (Val == std::numeric_limits<int32_t>::min());
1567   }
1568 
1569   bool isConstPoolAsmImm() const {
1570     // Delay processing of Constant Pool Immediate, this will turn into
1571     // a constant. Match no other operand
1572     return (isConstantPoolImm());
1573   }
1574 
1575   bool isPostIdxImm8() const {
1576     if (!isImm()) return false;
1577     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1578     if (!CE) return false;
1579     int64_t Val = CE->getValue();
1580     return (Val > -256 && Val < 256) ||
1581            (Val == std::numeric_limits<int32_t>::min());
1582   }
1583 
1584   bool isPostIdxImm8s4() const {
1585     if (!isImm()) return false;
1586     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1587     if (!CE) return false;
1588     int64_t Val = CE->getValue();
1589     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1590            (Val == std::numeric_limits<int32_t>::min());
1591   }
1592 
1593   bool isMSRMask() const { return Kind == k_MSRMask; }
1594   bool isBankedReg() const { return Kind == k_BankedReg; }
1595   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1596 
1597   // NEON operands.
1598   bool isSingleSpacedVectorList() const {
1599     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1600   }
1601 
1602   bool isDoubleSpacedVectorList() const {
1603     return Kind == k_VectorList && VectorList.isDoubleSpaced;
1604   }
1605 
1606   bool isVecListOneD() const {
1607     if (!isSingleSpacedVectorList()) return false;
1608     return VectorList.Count == 1;
1609   }
1610 
1611   bool isVecListDPair() const {
1612     if (!isSingleSpacedVectorList()) return false;
1613     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1614               .contains(VectorList.RegNum));
1615   }
1616 
1617   bool isVecListThreeD() const {
1618     if (!isSingleSpacedVectorList()) return false;
1619     return VectorList.Count == 3;
1620   }
1621 
1622   bool isVecListFourD() const {
1623     if (!isSingleSpacedVectorList()) return false;
1624     return VectorList.Count == 4;
1625   }
1626 
1627   bool isVecListDPairSpaced() const {
1628     if (Kind != k_VectorList) return false;
1629     if (isSingleSpacedVectorList()) return false;
1630     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1631               .contains(VectorList.RegNum));
1632   }
1633 
1634   bool isVecListThreeQ() const {
1635     if (!isDoubleSpacedVectorList()) return false;
1636     return VectorList.Count == 3;
1637   }
1638 
1639   bool isVecListFourQ() const {
1640     if (!isDoubleSpacedVectorList()) return false;
1641     return VectorList.Count == 4;
1642   }
1643 
1644   bool isSingleSpacedVectorAllLanes() const {
1645     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1646   }
1647 
1648   bool isDoubleSpacedVectorAllLanes() const {
1649     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1650   }
1651 
1652   bool isVecListOneDAllLanes() const {
1653     if (!isSingleSpacedVectorAllLanes()) return false;
1654     return VectorList.Count == 1;
1655   }
1656 
1657   bool isVecListDPairAllLanes() const {
1658     if (!isSingleSpacedVectorAllLanes()) return false;
1659     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1660               .contains(VectorList.RegNum));
1661   }
1662 
1663   bool isVecListDPairSpacedAllLanes() const {
1664     if (!isDoubleSpacedVectorAllLanes()) return false;
1665     return VectorList.Count == 2;
1666   }
1667 
1668   bool isVecListThreeDAllLanes() const {
1669     if (!isSingleSpacedVectorAllLanes()) return false;
1670     return VectorList.Count == 3;
1671   }
1672 
1673   bool isVecListThreeQAllLanes() const {
1674     if (!isDoubleSpacedVectorAllLanes()) return false;
1675     return VectorList.Count == 3;
1676   }
1677 
1678   bool isVecListFourDAllLanes() const {
1679     if (!isSingleSpacedVectorAllLanes()) return false;
1680     return VectorList.Count == 4;
1681   }
1682 
1683   bool isVecListFourQAllLanes() const {
1684     if (!isDoubleSpacedVectorAllLanes()) return false;
1685     return VectorList.Count == 4;
1686   }
1687 
1688   bool isSingleSpacedVectorIndexed() const {
1689     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1690   }
1691 
1692   bool isDoubleSpacedVectorIndexed() const {
1693     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1694   }
1695 
1696   bool isVecListOneDByteIndexed() const {
1697     if (!isSingleSpacedVectorIndexed()) return false;
1698     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1699   }
1700 
1701   bool isVecListOneDHWordIndexed() const {
1702     if (!isSingleSpacedVectorIndexed()) return false;
1703     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1704   }
1705 
1706   bool isVecListOneDWordIndexed() const {
1707     if (!isSingleSpacedVectorIndexed()) return false;
1708     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1709   }
1710 
1711   bool isVecListTwoDByteIndexed() const {
1712     if (!isSingleSpacedVectorIndexed()) return false;
1713     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1714   }
1715 
1716   bool isVecListTwoDHWordIndexed() const {
1717     if (!isSingleSpacedVectorIndexed()) return false;
1718     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1719   }
1720 
1721   bool isVecListTwoQWordIndexed() const {
1722     if (!isDoubleSpacedVectorIndexed()) return false;
1723     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1724   }
1725 
1726   bool isVecListTwoQHWordIndexed() const {
1727     if (!isDoubleSpacedVectorIndexed()) return false;
1728     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1729   }
1730 
1731   bool isVecListTwoDWordIndexed() const {
1732     if (!isSingleSpacedVectorIndexed()) return false;
1733     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1734   }
1735 
1736   bool isVecListThreeDByteIndexed() const {
1737     if (!isSingleSpacedVectorIndexed()) return false;
1738     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1739   }
1740 
1741   bool isVecListThreeDHWordIndexed() const {
1742     if (!isSingleSpacedVectorIndexed()) return false;
1743     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1744   }
1745 
1746   bool isVecListThreeQWordIndexed() const {
1747     if (!isDoubleSpacedVectorIndexed()) return false;
1748     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1749   }
1750 
1751   bool isVecListThreeQHWordIndexed() const {
1752     if (!isDoubleSpacedVectorIndexed()) return false;
1753     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1754   }
1755 
1756   bool isVecListThreeDWordIndexed() const {
1757     if (!isSingleSpacedVectorIndexed()) return false;
1758     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1759   }
1760 
1761   bool isVecListFourDByteIndexed() const {
1762     if (!isSingleSpacedVectorIndexed()) return false;
1763     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1764   }
1765 
1766   bool isVecListFourDHWordIndexed() const {
1767     if (!isSingleSpacedVectorIndexed()) return false;
1768     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1769   }
1770 
1771   bool isVecListFourQWordIndexed() const {
1772     if (!isDoubleSpacedVectorIndexed()) return false;
1773     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1774   }
1775 
1776   bool isVecListFourQHWordIndexed() const {
1777     if (!isDoubleSpacedVectorIndexed()) return false;
1778     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1779   }
1780 
1781   bool isVecListFourDWordIndexed() const {
1782     if (!isSingleSpacedVectorIndexed()) return false;
1783     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1784   }
1785 
1786   bool isVectorIndex8() const {
1787     if (Kind != k_VectorIndex) return false;
1788     return VectorIndex.Val < 8;
1789   }
1790 
1791   bool isVectorIndex16() const {
1792     if (Kind != k_VectorIndex) return false;
1793     return VectorIndex.Val < 4;
1794   }
1795 
1796   bool isVectorIndex32() const {
1797     if (Kind != k_VectorIndex) return false;
1798     return VectorIndex.Val < 2;
1799   }
1800   bool isVectorIndex64() const {
1801     if (Kind != k_VectorIndex) return false;
1802     return VectorIndex.Val < 1;
1803   }
1804 
1805   bool isNEONi8splat() const {
1806     if (!isImm()) return false;
1807     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1808     // Must be a constant.
1809     if (!CE) return false;
1810     int64_t Value = CE->getValue();
1811     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1812     // value.
1813     return Value >= 0 && Value < 256;
1814   }
1815 
1816   bool isNEONi16splat() const {
1817     if (isNEONByteReplicate(2))
1818       return false; // Leave that for bytes replication and forbid by default.
1819     if (!isImm())
1820       return false;
1821     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1822     // Must be a constant.
1823     if (!CE) return false;
1824     unsigned Value = CE->getValue();
1825     return ARM_AM::isNEONi16splat(Value);
1826   }
1827 
1828   bool isNEONi16splatNot() const {
1829     if (!isImm())
1830       return false;
1831     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1832     // Must be a constant.
1833     if (!CE) return false;
1834     unsigned Value = CE->getValue();
1835     return ARM_AM::isNEONi16splat(~Value & 0xffff);
1836   }
1837 
1838   bool isNEONi32splat() const {
1839     if (isNEONByteReplicate(4))
1840       return false; // Leave that for bytes replication and forbid by default.
1841     if (!isImm())
1842       return false;
1843     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1844     // Must be a constant.
1845     if (!CE) return false;
1846     unsigned Value = CE->getValue();
1847     return ARM_AM::isNEONi32splat(Value);
1848   }
1849 
1850   bool isNEONi32splatNot() const {
1851     if (!isImm())
1852       return false;
1853     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1854     // Must be a constant.
1855     if (!CE) return false;
1856     unsigned Value = CE->getValue();
1857     return ARM_AM::isNEONi32splat(~Value);
1858   }
1859 
1860   static bool isValidNEONi32vmovImm(int64_t Value) {
1861     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1862     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1863     return ((Value & 0xffffffffffffff00) == 0) ||
1864            ((Value & 0xffffffffffff00ff) == 0) ||
1865            ((Value & 0xffffffffff00ffff) == 0) ||
1866            ((Value & 0xffffffff00ffffff) == 0) ||
1867            ((Value & 0xffffffffffff00ff) == 0xff) ||
1868            ((Value & 0xffffffffff00ffff) == 0xffff);
1869   }
1870 
1871   bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv,
1872                        bool AllowMinusOne) const {
1873     assert((Width == 8 || Width == 16 || Width == 32) &&
1874            "Invalid element width");
1875     assert(NumElems * Width <= 64 && "Invalid result width");
1876 
1877     if (!isImm())
1878       return false;
1879     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1880     // Must be a constant.
1881     if (!CE)
1882       return false;
1883     int64_t Value = CE->getValue();
1884     if (!Value)
1885       return false; // Don't bother with zero.
1886     if (Inv)
1887       Value = ~Value;
1888 
1889     uint64_t Mask = (1ull << Width) - 1;
1890     uint64_t Elem = Value & Mask;
1891     if (!AllowMinusOne && Elem == Mask)
1892       return false;
1893     if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
1894       return false;
1895     if (Width == 32 && !isValidNEONi32vmovImm(Elem))
1896       return false;
1897 
1898     for (unsigned i = 1; i < NumElems; ++i) {
1899       Value >>= Width;
1900       if ((Value & Mask) != Elem)
1901         return false;
1902     }
1903     return true;
1904   }
1905 
1906   bool isNEONByteReplicate(unsigned NumBytes) const {
1907     return isNEONReplicate(8, NumBytes, false, true);
1908   }
1909 
1910   static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
1911     assert((FromW == 8 || FromW == 16 || FromW == 32) &&
1912            "Invalid source width");
1913     assert((ToW == 16 || ToW == 32 || ToW == 64) &&
1914            "Invalid destination width");
1915     assert(FromW < ToW && "ToW is not less than FromW");
1916   }
1917 
1918   template<unsigned FromW, unsigned ToW>
1919   bool isNEONmovReplicate() const {
1920     checkNeonReplicateArgs(FromW, ToW);
1921     bool AllowMinusOne = ToW != 64;
1922     return isNEONReplicate(FromW, ToW / FromW, false, AllowMinusOne);
1923   }
1924 
1925   template<unsigned FromW, unsigned ToW>
1926   bool isNEONinvReplicate() const {
1927     checkNeonReplicateArgs(FromW, ToW);
1928     return isNEONReplicate(FromW, ToW / FromW, true, true);
1929   }
1930 
1931   bool isNEONi32vmov() const {
1932     if (isNEONByteReplicate(4))
1933       return false; // Let it to be classified as byte-replicate case.
1934     if (!isImm())
1935       return false;
1936     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1937     // Must be a constant.
1938     if (!CE)
1939       return false;
1940     return isValidNEONi32vmovImm(CE->getValue());
1941   }
1942 
1943   bool isNEONi32vmovNeg() const {
1944     if (!isImm()) return false;
1945     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1946     // Must be a constant.
1947     if (!CE) return false;
1948     return isValidNEONi32vmovImm(~CE->getValue());
1949   }
1950 
1951   bool isNEONi64splat() const {
1952     if (!isImm()) return false;
1953     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1954     // Must be a constant.
1955     if (!CE) return false;
1956     uint64_t Value = CE->getValue();
1957     // i64 value with each byte being either 0 or 0xff.
1958     for (unsigned i = 0; i < 8; ++i, Value >>= 8)
1959       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1960     return true;
1961   }
1962 
1963   template<int64_t Angle, int64_t Remainder>
1964   bool isComplexRotation() const {
1965     if (!isImm()) return false;
1966 
1967     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1968     if (!CE) return false;
1969     uint64_t Value = CE->getValue();
1970 
1971     return (Value % Angle == Remainder && Value <= 270);
1972   }
1973 
1974   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1975     // Add as immediates when possible.  Null MCExpr = 0.
1976     if (!Expr)
1977       Inst.addOperand(MCOperand::createImm(0));
1978     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1979       Inst.addOperand(MCOperand::createImm(CE->getValue()));
1980     else
1981       Inst.addOperand(MCOperand::createExpr(Expr));
1982   }
1983 
1984   void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
1985     assert(N == 1 && "Invalid number of operands!");
1986     addExpr(Inst, getImm());
1987   }
1988 
1989   void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
1990     assert(N == 1 && "Invalid number of operands!");
1991     addExpr(Inst, getImm());
1992   }
1993 
1994   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1995     assert(N == 2 && "Invalid number of operands!");
1996     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1997     unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1998     Inst.addOperand(MCOperand::createReg(RegNum));
1999   }
2000 
2001   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2002     assert(N == 1 && "Invalid number of operands!");
2003     Inst.addOperand(MCOperand::createImm(getCoproc()));
2004   }
2005 
2006   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2007     assert(N == 1 && "Invalid number of operands!");
2008     Inst.addOperand(MCOperand::createImm(getCoproc()));
2009   }
2010 
2011   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2012     assert(N == 1 && "Invalid number of operands!");
2013     Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2014   }
2015 
2016   void addITMaskOperands(MCInst &Inst, unsigned N) const {
2017     assert(N == 1 && "Invalid number of operands!");
2018     Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2019   }
2020 
2021   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2022     assert(N == 1 && "Invalid number of operands!");
2023     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2024   }
2025 
2026   void addCCOutOperands(MCInst &Inst, unsigned N) const {
2027     assert(N == 1 && "Invalid number of operands!");
2028     Inst.addOperand(MCOperand::createReg(getReg()));
2029   }
2030 
2031   void addRegOperands(MCInst &Inst, unsigned N) const {
2032     assert(N == 1 && "Invalid number of operands!");
2033     Inst.addOperand(MCOperand::createReg(getReg()));
2034   }
2035 
2036   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2037     assert(N == 3 && "Invalid number of operands!");
2038     assert(isRegShiftedReg() &&
2039            "addRegShiftedRegOperands() on non-RegShiftedReg!");
2040     Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2041     Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2042     Inst.addOperand(MCOperand::createImm(
2043       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2044   }
2045 
2046   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2047     assert(N == 2 && "Invalid number of operands!");
2048     assert(isRegShiftedImm() &&
2049            "addRegShiftedImmOperands() on non-RegShiftedImm!");
2050     Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2051     // Shift of #32 is encoded as 0 where permitted
2052     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2053     Inst.addOperand(MCOperand::createImm(
2054       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2055   }
2056 
2057   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2058     assert(N == 1 && "Invalid number of operands!");
2059     Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2060                                          ShifterImm.Imm));
2061   }
2062 
2063   void addRegListOperands(MCInst &Inst, unsigned N) const {
2064     assert(N == 1 && "Invalid number of operands!");
2065     const SmallVectorImpl<unsigned> &RegList = getRegList();
2066     for (SmallVectorImpl<unsigned>::const_iterator
2067            I = RegList.begin(), E = RegList.end(); I != E; ++I)
2068       Inst.addOperand(MCOperand::createReg(*I));
2069   }
2070 
2071   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2072     addRegListOperands(Inst, N);
2073   }
2074 
2075   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2076     addRegListOperands(Inst, N);
2077   }
2078 
2079   void addRotImmOperands(MCInst &Inst, unsigned N) const {
2080     assert(N == 1 && "Invalid number of operands!");
2081     // Encoded as val>>3. The printer handles display as 8, 16, 24.
2082     Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2083   }
2084 
2085   void addModImmOperands(MCInst &Inst, unsigned N) const {
2086     assert(N == 1 && "Invalid number of operands!");
2087 
2088     // Support for fixups (MCFixup)
2089     if (isImm())
2090       return addImmOperands(Inst, N);
2091 
2092     Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2093   }
2094 
2095   void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2096     assert(N == 1 && "Invalid number of operands!");
2097     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2098     uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2099     Inst.addOperand(MCOperand::createImm(Enc));
2100   }
2101 
2102   void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2103     assert(N == 1 && "Invalid number of operands!");
2104     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2105     uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2106     Inst.addOperand(MCOperand::createImm(Enc));
2107   }
2108 
2109   void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2110     assert(N == 1 && "Invalid number of operands!");
2111     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2112     uint32_t Val = -CE->getValue();
2113     Inst.addOperand(MCOperand::createImm(Val));
2114   }
2115 
2116   void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2117     assert(N == 1 && "Invalid number of operands!");
2118     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2119     uint32_t Val = -CE->getValue();
2120     Inst.addOperand(MCOperand::createImm(Val));
2121   }
2122 
2123   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2124     assert(N == 1 && "Invalid number of operands!");
2125     // Munge the lsb/width into a bitfield mask.
2126     unsigned lsb = Bitfield.LSB;
2127     unsigned width = Bitfield.Width;
2128     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2129     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2130                       (32 - (lsb + width)));
2131     Inst.addOperand(MCOperand::createImm(Mask));
2132   }
2133 
2134   void addImmOperands(MCInst &Inst, unsigned N) const {
2135     assert(N == 1 && "Invalid number of operands!");
2136     addExpr(Inst, getImm());
2137   }
2138 
2139   void addFBits16Operands(MCInst &Inst, unsigned N) const {
2140     assert(N == 1 && "Invalid number of operands!");
2141     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2142     Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2143   }
2144 
2145   void addFBits32Operands(MCInst &Inst, unsigned N) const {
2146     assert(N == 1 && "Invalid number of operands!");
2147     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2148     Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2149   }
2150 
2151   void addFPImmOperands(MCInst &Inst, unsigned N) const {
2152     assert(N == 1 && "Invalid number of operands!");
2153     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2154     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2155     Inst.addOperand(MCOperand::createImm(Val));
2156   }
2157 
2158   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2159     assert(N == 1 && "Invalid number of operands!");
2160     // FIXME: We really want to scale the value here, but the LDRD/STRD
2161     // instruction don't encode operands that way yet.
2162     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2163     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2164   }
2165 
2166   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2167     assert(N == 1 && "Invalid number of operands!");
2168     // The immediate is scaled by four in the encoding and is stored
2169     // in the MCInst as such. Lop off the low two bits here.
2170     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2171     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2172   }
2173 
2174   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2175     assert(N == 1 && "Invalid number of operands!");
2176     // The immediate is scaled by four in the encoding and is stored
2177     // in the MCInst as such. Lop off the low two bits here.
2178     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2179     Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2180   }
2181 
2182   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2183     assert(N == 1 && "Invalid number of operands!");
2184     // The immediate is scaled by four in the encoding and is stored
2185     // in the MCInst as such. Lop off the low two bits here.
2186     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2187     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2188   }
2189 
2190   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2191     assert(N == 1 && "Invalid number of operands!");
2192     // The constant encodes as the immediate-1, and we store in the instruction
2193     // the bits as encoded, so subtract off one here.
2194     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2195     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2196   }
2197 
2198   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2199     assert(N == 1 && "Invalid number of operands!");
2200     // The constant encodes as the immediate-1, and we store in the instruction
2201     // the bits as encoded, so subtract off one here.
2202     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2203     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2204   }
2205 
2206   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2207     assert(N == 1 && "Invalid number of operands!");
2208     // The constant encodes as the immediate, except for 32, which encodes as
2209     // zero.
2210     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2211     unsigned Imm = CE->getValue();
2212     Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2213   }
2214 
2215   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2216     assert(N == 1 && "Invalid number of operands!");
2217     // An ASR value of 32 encodes as 0, so that's how we want to add it to
2218     // the instruction as well.
2219     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2220     int Val = CE->getValue();
2221     Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2222   }
2223 
2224   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2225     assert(N == 1 && "Invalid number of operands!");
2226     // The operand is actually a t2_so_imm, but we have its bitwise
2227     // negation in the assembly source, so twiddle it here.
2228     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2229     Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2230   }
2231 
2232   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2233     assert(N == 1 && "Invalid number of operands!");
2234     // The operand is actually a t2_so_imm, but we have its
2235     // negation in the assembly source, so twiddle it here.
2236     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2237     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2238   }
2239 
2240   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2241     assert(N == 1 && "Invalid number of operands!");
2242     // The operand is actually an imm0_4095, but we have its
2243     // negation in the assembly source, so twiddle it here.
2244     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2245     Inst.addOperand(MCOperand::createImm(-CE->getValue()));
2246   }
2247 
2248   void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2249     if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2250       Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2251       return;
2252     }
2253 
2254     const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2255     assert(SR && "Unknown value type!");
2256     Inst.addOperand(MCOperand::createExpr(SR));
2257   }
2258 
2259   void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2260     assert(N == 1 && "Invalid number of operands!");
2261     if (isImm()) {
2262       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2263       if (CE) {
2264         Inst.addOperand(MCOperand::createImm(CE->getValue()));
2265         return;
2266       }
2267 
2268       const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2269 
2270       assert(SR && "Unknown value type!");
2271       Inst.addOperand(MCOperand::createExpr(SR));
2272       return;
2273     }
2274 
2275     assert(isMem()  && "Unknown value type!");
2276     assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2277     Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
2278   }
2279 
2280   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2281     assert(N == 1 && "Invalid number of operands!");
2282     Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2283   }
2284 
2285   void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2286     assert(N == 1 && "Invalid number of operands!");
2287     Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2288   }
2289 
2290   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2291     assert(N == 1 && "Invalid number of operands!");
2292     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2293   }
2294 
2295   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2296     assert(N == 1 && "Invalid number of operands!");
2297     int32_t Imm = Memory.OffsetImm->getValue();
2298     Inst.addOperand(MCOperand::createImm(Imm));
2299   }
2300 
2301   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2302     assert(N == 1 && "Invalid number of operands!");
2303     assert(isImm() && "Not an immediate!");
2304 
2305     // If we have an immediate that's not a constant, treat it as a label
2306     // reference needing a fixup.
2307     if (!isa<MCConstantExpr>(getImm())) {
2308       Inst.addOperand(MCOperand::createExpr(getImm()));
2309       return;
2310     }
2311 
2312     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2313     int Val = CE->getValue();
2314     Inst.addOperand(MCOperand::createImm(Val));
2315   }
2316 
2317   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2318     assert(N == 2 && "Invalid number of operands!");
2319     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2320     Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2321   }
2322 
2323   void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2324     addAlignedMemoryOperands(Inst, N);
2325   }
2326 
2327   void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2328     addAlignedMemoryOperands(Inst, N);
2329   }
2330 
2331   void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2332     addAlignedMemoryOperands(Inst, N);
2333   }
2334 
2335   void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2336     addAlignedMemoryOperands(Inst, N);
2337   }
2338 
2339   void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2340     addAlignedMemoryOperands(Inst, N);
2341   }
2342 
2343   void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2344     addAlignedMemoryOperands(Inst, N);
2345   }
2346 
2347   void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2348     addAlignedMemoryOperands(Inst, N);
2349   }
2350 
2351   void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2352     addAlignedMemoryOperands(Inst, N);
2353   }
2354 
2355   void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2356     addAlignedMemoryOperands(Inst, N);
2357   }
2358 
2359   void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2360     addAlignedMemoryOperands(Inst, N);
2361   }
2362 
2363   void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2364     addAlignedMemoryOperands(Inst, N);
2365   }
2366 
2367   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2368     assert(N == 3 && "Invalid number of operands!");
2369     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2370     if (!Memory.OffsetRegNum) {
2371       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2372       // Special case for #-0
2373       if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2374       if (Val < 0) Val = -Val;
2375       Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2376     } else {
2377       // For register offset, we encode the shift type and negation flag
2378       // here.
2379       Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2380                               Memory.ShiftImm, Memory.ShiftType);
2381     }
2382     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2383     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2384     Inst.addOperand(MCOperand::createImm(Val));
2385   }
2386 
2387   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2388     assert(N == 2 && "Invalid number of operands!");
2389     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2390     assert(CE && "non-constant AM2OffsetImm operand!");
2391     int32_t Val = CE->getValue();
2392     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2393     // Special case for #-0
2394     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2395     if (Val < 0) Val = -Val;
2396     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2397     Inst.addOperand(MCOperand::createReg(0));
2398     Inst.addOperand(MCOperand::createImm(Val));
2399   }
2400 
2401   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2402     assert(N == 3 && "Invalid number of operands!");
2403     // If we have an immediate that's not a constant, treat it as a label
2404     // reference needing a fixup. If it is a constant, it's something else
2405     // and we reject it.
2406     if (isImm()) {
2407       Inst.addOperand(MCOperand::createExpr(getImm()));
2408       Inst.addOperand(MCOperand::createReg(0));
2409       Inst.addOperand(MCOperand::createImm(0));
2410       return;
2411     }
2412 
2413     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2414     if (!Memory.OffsetRegNum) {
2415       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2416       // Special case for #-0
2417       if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2418       if (Val < 0) Val = -Val;
2419       Val = ARM_AM::getAM3Opc(AddSub, Val);
2420     } else {
2421       // For register offset, we encode the shift type and negation flag
2422       // here.
2423       Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2424     }
2425     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2426     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2427     Inst.addOperand(MCOperand::createImm(Val));
2428   }
2429 
2430   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2431     assert(N == 2 && "Invalid number of operands!");
2432     if (Kind == k_PostIndexRegister) {
2433       int32_t Val =
2434         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2435       Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2436       Inst.addOperand(MCOperand::createImm(Val));
2437       return;
2438     }
2439 
2440     // Constant offset.
2441     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2442     int32_t Val = CE->getValue();
2443     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2444     // Special case for #-0
2445     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2446     if (Val < 0) Val = -Val;
2447     Val = ARM_AM::getAM3Opc(AddSub, Val);
2448     Inst.addOperand(MCOperand::createReg(0));
2449     Inst.addOperand(MCOperand::createImm(Val));
2450   }
2451 
2452   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2453     assert(N == 2 && "Invalid number of operands!");
2454     // If we have an immediate that's not a constant, treat it as a label
2455     // reference needing a fixup. If it is a constant, it's something else
2456     // and we reject it.
2457     if (isImm()) {
2458       Inst.addOperand(MCOperand::createExpr(getImm()));
2459       Inst.addOperand(MCOperand::createImm(0));
2460       return;
2461     }
2462 
2463     // The lower two bits are always zero and as such are not encoded.
2464     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2465     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2466     // Special case for #-0
2467     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2468     if (Val < 0) Val = -Val;
2469     Val = ARM_AM::getAM5Opc(AddSub, Val);
2470     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2471     Inst.addOperand(MCOperand::createImm(Val));
2472   }
2473 
2474   void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
2475     assert(N == 2 && "Invalid number of operands!");
2476     // If we have an immediate that's not a constant, treat it as a label
2477     // reference needing a fixup. If it is a constant, it's something else
2478     // and we reject it.
2479     if (isImm()) {
2480       Inst.addOperand(MCOperand::createExpr(getImm()));
2481       Inst.addOperand(MCOperand::createImm(0));
2482       return;
2483     }
2484 
2485     // The lower bit is always zero and as such is not encoded.
2486     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
2487     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2488     // Special case for #-0
2489     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2490     if (Val < 0) Val = -Val;
2491     Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
2492     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2493     Inst.addOperand(MCOperand::createImm(Val));
2494   }
2495 
2496   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2497     assert(N == 2 && "Invalid number of operands!");
2498     // If we have an immediate that's not a constant, treat it as a label
2499     // reference needing a fixup. If it is a constant, it's something else
2500     // and we reject it.
2501     if (isImm()) {
2502       Inst.addOperand(MCOperand::createExpr(getImm()));
2503       Inst.addOperand(MCOperand::createImm(0));
2504       return;
2505     }
2506 
2507     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2508     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2509     Inst.addOperand(MCOperand::createImm(Val));
2510   }
2511 
2512   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2513     assert(N == 2 && "Invalid number of operands!");
2514     // The lower two bits are always zero and as such are not encoded.
2515     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2516     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2517     Inst.addOperand(MCOperand::createImm(Val));
2518   }
2519 
2520   void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2521     assert(N == 2 && "Invalid number of operands!");
2522     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2523     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2524     Inst.addOperand(MCOperand::createImm(Val));
2525   }
2526 
2527   void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2528     addMemImm8OffsetOperands(Inst, N);
2529   }
2530 
2531   void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2532     addMemImm8OffsetOperands(Inst, N);
2533   }
2534 
2535   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2536     assert(N == 2 && "Invalid number of operands!");
2537     // If this is an immediate, it's a label reference.
2538     if (isImm()) {
2539       addExpr(Inst, getImm());
2540       Inst.addOperand(MCOperand::createImm(0));
2541       return;
2542     }
2543 
2544     // Otherwise, it's a normal memory reg+offset.
2545     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2546     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2547     Inst.addOperand(MCOperand::createImm(Val));
2548   }
2549 
2550   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2551     assert(N == 2 && "Invalid number of operands!");
2552     // If this is an immediate, it's a label reference.
2553     if (isImm()) {
2554       addExpr(Inst, getImm());
2555       Inst.addOperand(MCOperand::createImm(0));
2556       return;
2557     }
2558 
2559     // Otherwise, it's a normal memory reg+offset.
2560     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2561     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2562     Inst.addOperand(MCOperand::createImm(Val));
2563   }
2564 
2565   void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
2566     assert(N == 1 && "Invalid number of operands!");
2567     // This is container for the immediate that we will create the constant
2568     // pool from
2569     addExpr(Inst, getConstantPoolImm());
2570     return;
2571   }
2572 
2573   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2574     assert(N == 2 && "Invalid number of operands!");
2575     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2576     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2577   }
2578 
2579   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2580     assert(N == 2 && "Invalid number of operands!");
2581     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2582     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2583   }
2584 
2585   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2586     assert(N == 3 && "Invalid number of operands!");
2587     unsigned Val =
2588       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2589                         Memory.ShiftImm, Memory.ShiftType);
2590     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2591     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2592     Inst.addOperand(MCOperand::createImm(Val));
2593   }
2594 
2595   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2596     assert(N == 3 && "Invalid number of operands!");
2597     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2598     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2599     Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
2600   }
2601 
2602   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2603     assert(N == 2 && "Invalid number of operands!");
2604     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2605     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2606   }
2607 
2608   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2609     assert(N == 2 && "Invalid number of operands!");
2610     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2611     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2612     Inst.addOperand(MCOperand::createImm(Val));
2613   }
2614 
2615   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2616     assert(N == 2 && "Invalid number of operands!");
2617     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2618     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2619     Inst.addOperand(MCOperand::createImm(Val));
2620   }
2621 
2622   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2623     assert(N == 2 && "Invalid number of operands!");
2624     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2625     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2626     Inst.addOperand(MCOperand::createImm(Val));
2627   }
2628 
2629   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2630     assert(N == 2 && "Invalid number of operands!");
2631     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2632     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2633     Inst.addOperand(MCOperand::createImm(Val));
2634   }
2635 
2636   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2637     assert(N == 1 && "Invalid number of operands!");
2638     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2639     assert(CE && "non-constant post-idx-imm8 operand!");
2640     int Imm = CE->getValue();
2641     bool isAdd = Imm >= 0;
2642     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2643     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2644     Inst.addOperand(MCOperand::createImm(Imm));
2645   }
2646 
2647   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2648     assert(N == 1 && "Invalid number of operands!");
2649     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2650     assert(CE && "non-constant post-idx-imm8s4 operand!");
2651     int Imm = CE->getValue();
2652     bool isAdd = Imm >= 0;
2653     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2654     // Immediate is scaled by 4.
2655     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2656     Inst.addOperand(MCOperand::createImm(Imm));
2657   }
2658 
2659   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2660     assert(N == 2 && "Invalid number of operands!");
2661     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2662     Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
2663   }
2664 
2665   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2666     assert(N == 2 && "Invalid number of operands!");
2667     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2668     // The sign, shift type, and shift amount are encoded in a single operand
2669     // using the AM2 encoding helpers.
2670     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2671     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2672                                      PostIdxReg.ShiftTy);
2673     Inst.addOperand(MCOperand::createImm(Imm));
2674   }
2675 
2676   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2677     assert(N == 1 && "Invalid number of operands!");
2678     Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
2679   }
2680 
2681   void addBankedRegOperands(MCInst &Inst, unsigned N) const {
2682     assert(N == 1 && "Invalid number of operands!");
2683     Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
2684   }
2685 
2686   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2687     assert(N == 1 && "Invalid number of operands!");
2688     Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
2689   }
2690 
2691   void addVecListOperands(MCInst &Inst, unsigned N) const {
2692     assert(N == 1 && "Invalid number of operands!");
2693     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2694   }
2695 
2696   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2697     assert(N == 2 && "Invalid number of operands!");
2698     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2699     Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
2700   }
2701 
2702   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2703     assert(N == 1 && "Invalid number of operands!");
2704     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2705   }
2706 
2707   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2708     assert(N == 1 && "Invalid number of operands!");
2709     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2710   }
2711 
2712   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2713     assert(N == 1 && "Invalid number of operands!");
2714     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2715   }
2716 
2717   void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
2718     assert(N == 1 && "Invalid number of operands!");
2719     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2720   }
2721 
2722   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2723     assert(N == 1 && "Invalid number of operands!");
2724     // The immediate encodes the type of constant as well as the value.
2725     // Mask in that this is an i8 splat.
2726     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2727     Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
2728   }
2729 
2730   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2731     assert(N == 1 && "Invalid number of operands!");
2732     // The immediate encodes the type of constant as well as the value.
2733     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2734     unsigned Value = CE->getValue();
2735     Value = ARM_AM::encodeNEONi16splat(Value);
2736     Inst.addOperand(MCOperand::createImm(Value));
2737   }
2738 
2739   void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
2740     assert(N == 1 && "Invalid number of operands!");
2741     // The immediate encodes the type of constant as well as the value.
2742     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2743     unsigned Value = CE->getValue();
2744     Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
2745     Inst.addOperand(MCOperand::createImm(Value));
2746   }
2747 
2748   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2749     assert(N == 1 && "Invalid number of operands!");
2750     // The immediate encodes the type of constant as well as the value.
2751     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2752     unsigned Value = CE->getValue();
2753     Value = ARM_AM::encodeNEONi32splat(Value);
2754     Inst.addOperand(MCOperand::createImm(Value));
2755   }
2756 
2757   void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
2758     assert(N == 1 && "Invalid number of operands!");
2759     // The immediate encodes the type of constant as well as the value.
2760     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2761     unsigned Value = CE->getValue();
2762     Value = ARM_AM::encodeNEONi32splat(~Value);
2763     Inst.addOperand(MCOperand::createImm(Value));
2764   }
2765 
2766   void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
2767     // The immediate encodes the type of constant as well as the value.
2768     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2769     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2770             Inst.getOpcode() == ARM::VMOVv16i8) &&
2771           "All instructions that wants to replicate non-zero byte "
2772           "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2773     unsigned Value = CE->getValue();
2774     if (Inv)
2775       Value = ~Value;
2776     unsigned B = Value & 0xff;
2777     B |= 0xe00; // cmode = 0b1110
2778     Inst.addOperand(MCOperand::createImm(B));
2779   }
2780 
2781   void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
2782     assert(N == 1 && "Invalid number of operands!");
2783     addNEONi8ReplicateOperands(Inst, true);
2784   }
2785 
2786   static unsigned encodeNeonVMOVImmediate(unsigned Value) {
2787     if (Value >= 256 && Value <= 0xffff)
2788       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2789     else if (Value > 0xffff && Value <= 0xffffff)
2790       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2791     else if (Value > 0xffffff)
2792       Value = (Value >> 24) | 0x600;
2793     return Value;
2794   }
2795 
2796   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2797     assert(N == 1 && "Invalid number of operands!");
2798     // The immediate encodes the type of constant as well as the value.
2799     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2800     unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
2801     Inst.addOperand(MCOperand::createImm(Value));
2802   }
2803 
2804   void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
2805     assert(N == 1 && "Invalid number of operands!");
2806     addNEONi8ReplicateOperands(Inst, false);
2807   }
2808 
2809   void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
2810     assert(N == 1 && "Invalid number of operands!");
2811     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2812     assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
2813             Inst.getOpcode() == ARM::VMOVv8i16 ||
2814             Inst.getOpcode() == ARM::VMVNv4i16 ||
2815             Inst.getOpcode() == ARM::VMVNv8i16) &&
2816           "All instructions that want to replicate non-zero half-word "
2817           "always must be replaced with V{MOV,MVN}v{4,8}i16.");
2818     uint64_t Value = CE->getValue();
2819     unsigned Elem = Value & 0xffff;
2820     if (Elem >= 256)
2821       Elem = (Elem >> 8) | 0x200;
2822     Inst.addOperand(MCOperand::createImm(Elem));
2823   }
2824 
2825   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2826     assert(N == 1 && "Invalid number of operands!");
2827     // The immediate encodes the type of constant as well as the value.
2828     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2829     unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
2830     Inst.addOperand(MCOperand::createImm(Value));
2831   }
2832 
2833   void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
2834     assert(N == 1 && "Invalid number of operands!");
2835     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2836     assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
2837             Inst.getOpcode() == ARM::VMOVv4i32 ||
2838             Inst.getOpcode() == ARM::VMVNv2i32 ||
2839             Inst.getOpcode() == ARM::VMVNv4i32) &&
2840           "All instructions that want to replicate non-zero word "
2841           "always must be replaced with V{MOV,MVN}v{2,4}i32.");
2842     uint64_t Value = CE->getValue();
2843     unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
2844     Inst.addOperand(MCOperand::createImm(Elem));
2845   }
2846 
2847   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2848     assert(N == 1 && "Invalid number of operands!");
2849     // The immediate encodes the type of constant as well as the value.
2850     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2851     uint64_t Value = CE->getValue();
2852     unsigned Imm = 0;
2853     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2854       Imm |= (Value & 1) << i;
2855     }
2856     Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
2857   }
2858 
2859   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2860     assert(N == 1 && "Invalid number of operands!");
2861     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2862     Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
2863   }
2864 
2865   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2866     assert(N == 1 && "Invalid number of operands!");
2867     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2868     Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
2869   }
2870 
2871   void print(raw_ostream &OS) const override;
2872 
2873   static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
2874     auto Op = make_unique<ARMOperand>(k_ITCondMask);
2875     Op->ITMask.Mask = Mask;
2876     Op->StartLoc = S;
2877     Op->EndLoc = S;
2878     return Op;
2879   }
2880 
2881   static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
2882                                                     SMLoc S) {
2883     auto Op = make_unique<ARMOperand>(k_CondCode);
2884     Op->CC.Val = CC;
2885     Op->StartLoc = S;
2886     Op->EndLoc = S;
2887     return Op;
2888   }
2889 
2890   static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
2891     auto Op = make_unique<ARMOperand>(k_CoprocNum);
2892     Op->Cop.Val = CopVal;
2893     Op->StartLoc = S;
2894     Op->EndLoc = S;
2895     return Op;
2896   }
2897 
2898   static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
2899     auto Op = make_unique<ARMOperand>(k_CoprocReg);
2900     Op->Cop.Val = CopVal;
2901     Op->StartLoc = S;
2902     Op->EndLoc = S;
2903     return Op;
2904   }
2905 
2906   static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
2907                                                         SMLoc E) {
2908     auto Op = make_unique<ARMOperand>(k_CoprocOption);
2909     Op->Cop.Val = Val;
2910     Op->StartLoc = S;
2911     Op->EndLoc = E;
2912     return Op;
2913   }
2914 
2915   static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
2916     auto Op = make_unique<ARMOperand>(k_CCOut);
2917     Op->Reg.RegNum = RegNum;
2918     Op->StartLoc = S;
2919     Op->EndLoc = S;
2920     return Op;
2921   }
2922 
2923   static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
2924     auto Op = make_unique<ARMOperand>(k_Token);
2925     Op->Tok.Data = Str.data();
2926     Op->Tok.Length = Str.size();
2927     Op->StartLoc = S;
2928     Op->EndLoc = S;
2929     return Op;
2930   }
2931 
2932   static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
2933                                                SMLoc E) {
2934     auto Op = make_unique<ARMOperand>(k_Register);
2935     Op->Reg.RegNum = RegNum;
2936     Op->StartLoc = S;
2937     Op->EndLoc = E;
2938     return Op;
2939   }
2940 
2941   static std::unique_ptr<ARMOperand>
2942   CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2943                         unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
2944                         SMLoc E) {
2945     auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
2946     Op->RegShiftedReg.ShiftTy = ShTy;
2947     Op->RegShiftedReg.SrcReg = SrcReg;
2948     Op->RegShiftedReg.ShiftReg = ShiftReg;
2949     Op->RegShiftedReg.ShiftImm = ShiftImm;
2950     Op->StartLoc = S;
2951     Op->EndLoc = E;
2952     return Op;
2953   }
2954 
2955   static std::unique_ptr<ARMOperand>
2956   CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2957                          unsigned ShiftImm, SMLoc S, SMLoc E) {
2958     auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
2959     Op->RegShiftedImm.ShiftTy = ShTy;
2960     Op->RegShiftedImm.SrcReg = SrcReg;
2961     Op->RegShiftedImm.ShiftImm = ShiftImm;
2962     Op->StartLoc = S;
2963     Op->EndLoc = E;
2964     return Op;
2965   }
2966 
2967   static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
2968                                                       SMLoc S, SMLoc E) {
2969     auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
2970     Op->ShifterImm.isASR = isASR;
2971     Op->ShifterImm.Imm = Imm;
2972     Op->StartLoc = S;
2973     Op->EndLoc = E;
2974     return Op;
2975   }
2976 
2977   static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
2978                                                   SMLoc E) {
2979     auto Op = make_unique<ARMOperand>(k_RotateImmediate);
2980     Op->RotImm.Imm = Imm;
2981     Op->StartLoc = S;
2982     Op->EndLoc = E;
2983     return Op;
2984   }
2985 
2986   static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
2987                                                   SMLoc S, SMLoc E) {
2988     auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
2989     Op->ModImm.Bits = Bits;
2990     Op->ModImm.Rot = Rot;
2991     Op->StartLoc = S;
2992     Op->EndLoc = E;
2993     return Op;
2994   }
2995 
2996   static std::unique_ptr<ARMOperand>
2997   CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2998     auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate);
2999     Op->Imm.Val = Val;
3000     Op->StartLoc = S;
3001     Op->EndLoc = E;
3002     return Op;
3003   }
3004 
3005   static std::unique_ptr<ARMOperand>
3006   CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3007     auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
3008     Op->Bitfield.LSB = LSB;
3009     Op->Bitfield.Width = Width;
3010     Op->StartLoc = S;
3011     Op->EndLoc = E;
3012     return Op;
3013   }
3014 
3015   static std::unique_ptr<ARMOperand>
3016   CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3017                 SMLoc StartLoc, SMLoc EndLoc) {
3018     assert(Regs.size() > 0 && "RegList contains no registers?");
3019     KindTy Kind = k_RegisterList;
3020 
3021     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
3022       Kind = k_DPRRegisterList;
3023     else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
3024              contains(Regs.front().second))
3025       Kind = k_SPRRegisterList;
3026 
3027     // Sort based on the register encoding values.
3028     array_pod_sort(Regs.begin(), Regs.end());
3029 
3030     auto Op = make_unique<ARMOperand>(Kind);
3031     for (SmallVectorImpl<std::pair<unsigned, unsigned>>::const_iterator
3032            I = Regs.begin(), E = Regs.end(); I != E; ++I)
3033       Op->Registers.push_back(I->second);
3034     Op->StartLoc = StartLoc;
3035     Op->EndLoc = EndLoc;
3036     return Op;
3037   }
3038 
3039   static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3040                                                       unsigned Count,
3041                                                       bool isDoubleSpaced,
3042                                                       SMLoc S, SMLoc E) {
3043     auto Op = make_unique<ARMOperand>(k_VectorList);
3044     Op->VectorList.RegNum = RegNum;
3045     Op->VectorList.Count = Count;
3046     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3047     Op->StartLoc = S;
3048     Op->EndLoc = E;
3049     return Op;
3050   }
3051 
3052   static std::unique_ptr<ARMOperand>
3053   CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3054                            SMLoc S, SMLoc E) {
3055     auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
3056     Op->VectorList.RegNum = RegNum;
3057     Op->VectorList.Count = Count;
3058     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3059     Op->StartLoc = S;
3060     Op->EndLoc = E;
3061     return Op;
3062   }
3063 
3064   static std::unique_ptr<ARMOperand>
3065   CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3066                           bool isDoubleSpaced, SMLoc S, SMLoc E) {
3067     auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
3068     Op->VectorList.RegNum = RegNum;
3069     Op->VectorList.Count = Count;
3070     Op->VectorList.LaneIndex = Index;
3071     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3072     Op->StartLoc = S;
3073     Op->EndLoc = E;
3074     return Op;
3075   }
3076 
3077   static std::unique_ptr<ARMOperand>
3078   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3079     auto Op = make_unique<ARMOperand>(k_VectorIndex);
3080     Op->VectorIndex.Val = Idx;
3081     Op->StartLoc = S;
3082     Op->EndLoc = E;
3083     return Op;
3084   }
3085 
3086   static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3087                                                SMLoc E) {
3088     auto Op = make_unique<ARMOperand>(k_Immediate);
3089     Op->Imm.Val = Val;
3090     Op->StartLoc = S;
3091     Op->EndLoc = E;
3092     return Op;
3093   }
3094 
3095   static std::unique_ptr<ARMOperand>
3096   CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
3097             unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
3098             unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
3099             SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3100     auto Op = make_unique<ARMOperand>(k_Memory);
3101     Op->Memory.BaseRegNum = BaseRegNum;
3102     Op->Memory.OffsetImm = OffsetImm;
3103     Op->Memory.OffsetRegNum = OffsetRegNum;
3104     Op->Memory.ShiftType = ShiftType;
3105     Op->Memory.ShiftImm = ShiftImm;
3106     Op->Memory.Alignment = Alignment;
3107     Op->Memory.isNegative = isNegative;
3108     Op->StartLoc = S;
3109     Op->EndLoc = E;
3110     Op->AlignmentLoc = AlignmentLoc;
3111     return Op;
3112   }
3113 
3114   static std::unique_ptr<ARMOperand>
3115   CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3116                    unsigned ShiftImm, SMLoc S, SMLoc E) {
3117     auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
3118     Op->PostIdxReg.RegNum = RegNum;
3119     Op->PostIdxReg.isAdd = isAdd;
3120     Op->PostIdxReg.ShiftTy = ShiftTy;
3121     Op->PostIdxReg.ShiftImm = ShiftImm;
3122     Op->StartLoc = S;
3123     Op->EndLoc = E;
3124     return Op;
3125   }
3126 
3127   static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3128                                                          SMLoc S) {
3129     auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
3130     Op->MBOpt.Val = Opt;
3131     Op->StartLoc = S;
3132     Op->EndLoc = S;
3133     return Op;
3134   }
3135 
3136   static std::unique_ptr<ARMOperand>
3137   CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3138     auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3139     Op->ISBOpt.Val = Opt;
3140     Op->StartLoc = S;
3141     Op->EndLoc = S;
3142     return Op;
3143   }
3144 
3145   static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3146                                                       SMLoc S) {
3147     auto Op = make_unique<ARMOperand>(k_ProcIFlags);
3148     Op->IFlags.Val = IFlags;
3149     Op->StartLoc = S;
3150     Op->EndLoc = S;
3151     return Op;
3152   }
3153 
3154   static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3155     auto Op = make_unique<ARMOperand>(k_MSRMask);
3156     Op->MMask.Val = MMask;
3157     Op->StartLoc = S;
3158     Op->EndLoc = S;
3159     return Op;
3160   }
3161 
3162   static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3163     auto Op = make_unique<ARMOperand>(k_BankedReg);
3164     Op->BankedReg.Val = Reg;
3165     Op->StartLoc = S;
3166     Op->EndLoc = S;
3167     return Op;
3168   }
3169 };
3170 
3171 } // end anonymous namespace.
3172 
3173 void ARMOperand::print(raw_ostream &OS) const {
3174   switch (Kind) {
3175   case k_CondCode:
3176     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3177     break;
3178   case k_CCOut:
3179     OS << "<ccout " << getReg() << ">";
3180     break;
3181   case k_ITCondMask: {
3182     static const char *const MaskStr[] = {
3183       "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
3184       "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
3185     };
3186     assert((ITMask.Mask & 0xf) == ITMask.Mask);
3187     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3188     break;
3189   }
3190   case k_CoprocNum:
3191     OS << "<coprocessor number: " << getCoproc() << ">";
3192     break;
3193   case k_CoprocReg:
3194     OS << "<coprocessor register: " << getCoproc() << ">";
3195     break;
3196   case k_CoprocOption:
3197     OS << "<coprocessor option: " << CoprocOption.Val << ">";
3198     break;
3199   case k_MSRMask:
3200     OS << "<mask: " << getMSRMask() << ">";
3201     break;
3202   case k_BankedReg:
3203     OS << "<banked reg: " << getBankedReg() << ">";
3204     break;
3205   case k_Immediate:
3206     OS << *getImm();
3207     break;
3208   case k_MemBarrierOpt:
3209     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3210     break;
3211   case k_InstSyncBarrierOpt:
3212     OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3213     break;
3214   case k_Memory:
3215     OS << "<memory "
3216        << " base:" << Memory.BaseRegNum;
3217     OS << ">";
3218     break;
3219   case k_PostIndexRegister:
3220     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3221        << PostIdxReg.RegNum;
3222     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3223       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3224          << PostIdxReg.ShiftImm;
3225     OS << ">";
3226     break;
3227   case k_ProcIFlags: {
3228     OS << "<ARM_PROC::";
3229     unsigned IFlags = getProcIFlags();
3230     for (int i=2; i >= 0; --i)
3231       if (IFlags & (1 << i))
3232         OS << ARM_PROC::IFlagsToString(1 << i);
3233     OS << ">";
3234     break;
3235   }
3236   case k_Register:
3237     OS << "<register " << getReg() << ">";
3238     break;
3239   case k_ShifterImmediate:
3240     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3241        << " #" << ShifterImm.Imm << ">";
3242     break;
3243   case k_ShiftedRegister:
3244     OS << "<so_reg_reg "
3245        << RegShiftedReg.SrcReg << " "
3246        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
3247        << " " << RegShiftedReg.ShiftReg << ">";
3248     break;
3249   case k_ShiftedImmediate:
3250     OS << "<so_reg_imm "
3251        << RegShiftedImm.SrcReg << " "
3252        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
3253        << " #" << RegShiftedImm.ShiftImm << ">";
3254     break;
3255   case k_RotateImmediate:
3256     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3257     break;
3258   case k_ModifiedImmediate:
3259     OS << "<mod_imm #" << ModImm.Bits << ", #"
3260        <<  ModImm.Rot << ")>";
3261     break;
3262   case k_ConstantPoolImmediate:
3263     OS << "<constant_pool_imm #" << *getConstantPoolImm();
3264     break;
3265   case k_BitfieldDescriptor:
3266     OS << "<bitfield " << "lsb: " << Bitfield.LSB
3267        << ", width: " << Bitfield.Width << ">";
3268     break;
3269   case k_RegisterList:
3270   case k_DPRRegisterList:
3271   case k_SPRRegisterList: {
3272     OS << "<register_list ";
3273 
3274     const SmallVectorImpl<unsigned> &RegList = getRegList();
3275     for (SmallVectorImpl<unsigned>::const_iterator
3276            I = RegList.begin(), E = RegList.end(); I != E; ) {
3277       OS << *I;
3278       if (++I < E) OS << ", ";
3279     }
3280 
3281     OS << ">";
3282     break;
3283   }
3284   case k_VectorList:
3285     OS << "<vector_list " << VectorList.Count << " * "
3286        << VectorList.RegNum << ">";
3287     break;
3288   case k_VectorListAllLanes:
3289     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
3290        << VectorList.RegNum << ">";
3291     break;
3292   case k_VectorListIndexed:
3293     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
3294        << VectorList.Count << " * " << VectorList.RegNum << ">";
3295     break;
3296   case k_Token:
3297     OS << "'" << getToken() << "'";
3298     break;
3299   case k_VectorIndex:
3300     OS << "<vectorindex " << getVectorIndex() << ">";
3301     break;
3302   }
3303 }
3304 
3305 /// @name Auto-generated Match Functions
3306 /// {
3307 
3308 static unsigned MatchRegisterName(StringRef Name);
3309 
3310 /// }
3311 
3312 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
3313                                  SMLoc &StartLoc, SMLoc &EndLoc) {
3314   const AsmToken &Tok = getParser().getTok();
3315   StartLoc = Tok.getLoc();
3316   EndLoc = Tok.getEndLoc();
3317   RegNo = tryParseRegister();
3318 
3319   return (RegNo == (unsigned)-1);
3320 }
3321 
3322 /// Try to parse a register name.  The token must be an Identifier when called,
3323 /// and if it is a register name the token is eaten and the register number is
3324 /// returned.  Otherwise return -1.
3325 int ARMAsmParser::tryParseRegister() {
3326   MCAsmParser &Parser = getParser();
3327   const AsmToken &Tok = Parser.getTok();
3328   if (Tok.isNot(AsmToken::Identifier)) return -1;
3329 
3330   std::string lowerCase = Tok.getString().lower();
3331   unsigned RegNum = MatchRegisterName(lowerCase);
3332   if (!RegNum) {
3333     RegNum = StringSwitch<unsigned>(lowerCase)
3334       .Case("r13", ARM::SP)
3335       .Case("r14", ARM::LR)
3336       .Case("r15", ARM::PC)
3337       .Case("ip", ARM::R12)
3338       // Additional register name aliases for 'gas' compatibility.
3339       .Case("a1", ARM::R0)
3340       .Case("a2", ARM::R1)
3341       .Case("a3", ARM::R2)
3342       .Case("a4", ARM::R3)
3343       .Case("v1", ARM::R4)
3344       .Case("v2", ARM::R5)
3345       .Case("v3", ARM::R6)
3346       .Case("v4", ARM::R7)
3347       .Case("v5", ARM::R8)
3348       .Case("v6", ARM::R9)
3349       .Case("v7", ARM::R10)
3350       .Case("v8", ARM::R11)
3351       .Case("sb", ARM::R9)
3352       .Case("sl", ARM::R10)
3353       .Case("fp", ARM::R11)
3354       .Default(0);
3355   }
3356   if (!RegNum) {
3357     // Check for aliases registered via .req. Canonicalize to lower case.
3358     // That's more consistent since register names are case insensitive, and
3359     // it's how the original entry was passed in from MC/MCParser/AsmParser.
3360     StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
3361     // If no match, return failure.
3362     if (Entry == RegisterReqs.end())
3363       return -1;
3364     Parser.Lex(); // Eat identifier token.
3365     return Entry->getValue();
3366   }
3367 
3368   // Some FPUs only have 16 D registers, so D16-D31 are invalid
3369   if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
3370     return -1;
3371 
3372   Parser.Lex(); // Eat identifier token.
3373 
3374   return RegNum;
3375 }
3376 
3377 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
3378 // If a recoverable error occurs, return 1. If an irrecoverable error
3379 // occurs, return -1. An irrecoverable error is one where tokens have been
3380 // consumed in the process of trying to parse the shifter (i.e., when it is
3381 // indeed a shifter operand, but malformed).
3382 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
3383   MCAsmParser &Parser = getParser();
3384   SMLoc S = Parser.getTok().getLoc();
3385   const AsmToken &Tok = Parser.getTok();
3386   if (Tok.isNot(AsmToken::Identifier))
3387     return -1;
3388 
3389   std::string lowerCase = Tok.getString().lower();
3390   ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
3391       .Case("asl", ARM_AM::lsl)
3392       .Case("lsl", ARM_AM::lsl)
3393       .Case("lsr", ARM_AM::lsr)
3394       .Case("asr", ARM_AM::asr)
3395       .Case("ror", ARM_AM::ror)
3396       .Case("rrx", ARM_AM::rrx)
3397       .Default(ARM_AM::no_shift);
3398 
3399   if (ShiftTy == ARM_AM::no_shift)
3400     return 1;
3401 
3402   Parser.Lex(); // Eat the operator.
3403 
3404   // The source register for the shift has already been added to the
3405   // operand list, so we need to pop it off and combine it into the shifted
3406   // register operand instead.
3407   std::unique_ptr<ARMOperand> PrevOp(
3408       (ARMOperand *)Operands.pop_back_val().release());
3409   if (!PrevOp->isReg())
3410     return Error(PrevOp->getStartLoc(), "shift must be of a register");
3411   int SrcReg = PrevOp->getReg();
3412 
3413   SMLoc EndLoc;
3414   int64_t Imm = 0;
3415   int ShiftReg = 0;
3416   if (ShiftTy == ARM_AM::rrx) {
3417     // RRX Doesn't have an explicit shift amount. The encoder expects
3418     // the shift register to be the same as the source register. Seems odd,
3419     // but OK.
3420     ShiftReg = SrcReg;
3421   } else {
3422     // Figure out if this is shifted by a constant or a register (for non-RRX).
3423     if (Parser.getTok().is(AsmToken::Hash) ||
3424         Parser.getTok().is(AsmToken::Dollar)) {
3425       Parser.Lex(); // Eat hash.
3426       SMLoc ImmLoc = Parser.getTok().getLoc();
3427       const MCExpr *ShiftExpr = nullptr;
3428       if (getParser().parseExpression(ShiftExpr, EndLoc)) {
3429         Error(ImmLoc, "invalid immediate shift value");
3430         return -1;
3431       }
3432       // The expression must be evaluatable as an immediate.
3433       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
3434       if (!CE) {
3435         Error(ImmLoc, "invalid immediate shift value");
3436         return -1;
3437       }
3438       // Range check the immediate.
3439       // lsl, ror: 0 <= imm <= 31
3440       // lsr, asr: 0 <= imm <= 32
3441       Imm = CE->getValue();
3442       if (Imm < 0 ||
3443           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
3444           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
3445         Error(ImmLoc, "immediate shift value out of range");
3446         return -1;
3447       }
3448       // shift by zero is a nop. Always send it through as lsl.
3449       // ('as' compatibility)
3450       if (Imm == 0)
3451         ShiftTy = ARM_AM::lsl;
3452     } else if (Parser.getTok().is(AsmToken::Identifier)) {
3453       SMLoc L = Parser.getTok().getLoc();
3454       EndLoc = Parser.getTok().getEndLoc();
3455       ShiftReg = tryParseRegister();
3456       if (ShiftReg == -1) {
3457         Error(L, "expected immediate or register in shift operand");
3458         return -1;
3459       }
3460     } else {
3461       Error(Parser.getTok().getLoc(),
3462             "expected immediate or register in shift operand");
3463       return -1;
3464     }
3465   }
3466 
3467   if (ShiftReg && ShiftTy != ARM_AM::rrx)
3468     Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3469                                                          ShiftReg, Imm,
3470                                                          S, EndLoc));
3471   else
3472     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3473                                                           S, EndLoc));
3474 
3475   return 0;
3476 }
3477 
3478 /// Try to parse a register name.  The token must be an Identifier when called.
3479 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
3480 /// if there is a "writeback". 'true' if it's not a register.
3481 ///
3482 /// TODO this is likely to change to allow different register types and or to
3483 /// parse for a specific register type.
3484 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3485   MCAsmParser &Parser = getParser();
3486   SMLoc RegStartLoc = Parser.getTok().getLoc();
3487   SMLoc RegEndLoc = Parser.getTok().getEndLoc();
3488   int RegNo = tryParseRegister();
3489   if (RegNo == -1)
3490     return true;
3491 
3492   Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
3493 
3494   const AsmToken &ExclaimTok = Parser.getTok();
3495   if (ExclaimTok.is(AsmToken::Exclaim)) {
3496     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3497                                                ExclaimTok.getLoc()));
3498     Parser.Lex(); // Eat exclaim token
3499     return false;
3500   }
3501 
3502   // Also check for an index operand. This is only legal for vector registers,
3503   // but that'll get caught OK in operand matching, so we don't need to
3504   // explicitly filter everything else out here.
3505   if (Parser.getTok().is(AsmToken::LBrac)) {
3506     SMLoc SIdx = Parser.getTok().getLoc();
3507     Parser.Lex(); // Eat left bracket token.
3508 
3509     const MCExpr *ImmVal;
3510     if (getParser().parseExpression(ImmVal))
3511       return true;
3512     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3513     if (!MCE)
3514       return TokError("immediate value expected for vector index");
3515 
3516     if (Parser.getTok().isNot(AsmToken::RBrac))
3517       return Error(Parser.getTok().getLoc(), "']' expected");
3518 
3519     SMLoc E = Parser.getTok().getEndLoc();
3520     Parser.Lex(); // Eat right bracket token.
3521 
3522     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3523                                                      SIdx, E,
3524                                                      getContext()));
3525   }
3526 
3527   return false;
3528 }
3529 
3530 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
3531 /// instruction with a symbolic operand name.
3532 /// We accept "crN" syntax for GAS compatibility.
3533 /// <operand-name> ::= <prefix><number>
3534 /// If CoprocOp is 'c', then:
3535 ///   <prefix> ::= c | cr
3536 /// If CoprocOp is 'p', then :
3537 ///   <prefix> ::= p
3538 /// <number> ::= integer in range [0, 15]
3539 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3540   // Use the same layout as the tablegen'erated register name matcher. Ugly,
3541   // but efficient.
3542   if (Name.size() < 2 || Name[0] != CoprocOp)
3543     return -1;
3544   Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3545 
3546   switch (Name.size()) {
3547   default: return -1;
3548   case 1:
3549     switch (Name[0]) {
3550     default:  return -1;
3551     case '0': return 0;
3552     case '1': return 1;
3553     case '2': return 2;
3554     case '3': return 3;
3555     case '4': return 4;
3556     case '5': return 5;
3557     case '6': return 6;
3558     case '7': return 7;
3559     case '8': return 8;
3560     case '9': return 9;
3561     }
3562   case 2:
3563     if (Name[0] != '1')
3564       return -1;
3565     switch (Name[1]) {
3566     default:  return -1;
3567     // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3568     // However, old cores (v5/v6) did use them in that way.
3569     case '0': return 10;
3570     case '1': return 11;
3571     case '2': return 12;
3572     case '3': return 13;
3573     case '4': return 14;
3574     case '5': return 15;
3575     }
3576   }
3577 }
3578 
3579 /// parseITCondCode - Try to parse a condition code for an IT instruction.
3580 OperandMatchResultTy
3581 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3582   MCAsmParser &Parser = getParser();
3583   SMLoc S = Parser.getTok().getLoc();
3584   const AsmToken &Tok = Parser.getTok();
3585   if (!Tok.is(AsmToken::Identifier))
3586     return MatchOperand_NoMatch;
3587   unsigned CC = ARMCondCodeFromString(Tok.getString());
3588   if (CC == ~0U)
3589     return MatchOperand_NoMatch;
3590   Parser.Lex(); // Eat the token.
3591 
3592   Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3593 
3594   return MatchOperand_Success;
3595 }
3596 
3597 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3598 /// token must be an Identifier when called, and if it is a coprocessor
3599 /// number, the token is eaten and the operand is added to the operand list.
3600 OperandMatchResultTy
3601 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3602   MCAsmParser &Parser = getParser();
3603   SMLoc S = Parser.getTok().getLoc();
3604   const AsmToken &Tok = Parser.getTok();
3605   if (Tok.isNot(AsmToken::Identifier))
3606     return MatchOperand_NoMatch;
3607 
3608   int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
3609   if (Num == -1)
3610     return MatchOperand_NoMatch;
3611   // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3612   if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3613     return MatchOperand_NoMatch;
3614 
3615   Parser.Lex(); // Eat identifier token.
3616   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3617   return MatchOperand_Success;
3618 }
3619 
3620 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3621 /// token must be an Identifier when called, and if it is a coprocessor
3622 /// number, the token is eaten and the operand is added to the operand list.
3623 OperandMatchResultTy
3624 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3625   MCAsmParser &Parser = getParser();
3626   SMLoc S = Parser.getTok().getLoc();
3627   const AsmToken &Tok = Parser.getTok();
3628   if (Tok.isNot(AsmToken::Identifier))
3629     return MatchOperand_NoMatch;
3630 
3631   int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
3632   if (Reg == -1)
3633     return MatchOperand_NoMatch;
3634 
3635   Parser.Lex(); // Eat identifier token.
3636   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3637   return MatchOperand_Success;
3638 }
3639 
3640 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3641 /// coproc_option : '{' imm0_255 '}'
3642 OperandMatchResultTy
3643 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3644   MCAsmParser &Parser = getParser();
3645   SMLoc S = Parser.getTok().getLoc();
3646 
3647   // If this isn't a '{', this isn't a coprocessor immediate operand.
3648   if (Parser.getTok().isNot(AsmToken::LCurly))
3649     return MatchOperand_NoMatch;
3650   Parser.Lex(); // Eat the '{'
3651 
3652   const MCExpr *Expr;
3653   SMLoc Loc = Parser.getTok().getLoc();
3654   if (getParser().parseExpression(Expr)) {
3655     Error(Loc, "illegal expression");
3656     return MatchOperand_ParseFail;
3657   }
3658   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3659   if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3660     Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3661     return MatchOperand_ParseFail;
3662   }
3663   int Val = CE->getValue();
3664 
3665   // Check for and consume the closing '}'
3666   if (Parser.getTok().isNot(AsmToken::RCurly))
3667     return MatchOperand_ParseFail;
3668   SMLoc E = Parser.getTok().getEndLoc();
3669   Parser.Lex(); // Eat the '}'
3670 
3671   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3672   return MatchOperand_Success;
3673 }
3674 
3675 // For register list parsing, we need to map from raw GPR register numbering
3676 // to the enumeration values. The enumeration values aren't sorted by
3677 // register number due to our using "sp", "lr" and "pc" as canonical names.
3678 static unsigned getNextRegister(unsigned Reg) {
3679   // If this is a GPR, we need to do it manually, otherwise we can rely
3680   // on the sort ordering of the enumeration since the other reg-classes
3681   // are sane.
3682   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3683     return Reg + 1;
3684   switch(Reg) {
3685   default: llvm_unreachable("Invalid GPR number!");
3686   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
3687   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
3688   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
3689   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
3690   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
3691   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3692   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
3693   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
3694   }
3695 }
3696 
3697 /// Parse a register list.
3698 bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
3699   MCAsmParser &Parser = getParser();
3700   if (Parser.getTok().isNot(AsmToken::LCurly))
3701     return TokError("Token is not a Left Curly Brace");
3702   SMLoc S = Parser.getTok().getLoc();
3703   Parser.Lex(); // Eat '{' token.
3704   SMLoc RegLoc = Parser.getTok().getLoc();
3705 
3706   // Check the first register in the list to see what register class
3707   // this is a list of.
3708   int Reg = tryParseRegister();
3709   if (Reg == -1)
3710     return Error(RegLoc, "register expected");
3711 
3712   // The reglist instructions have at most 16 registers, so reserve
3713   // space for that many.
3714   int EReg = 0;
3715   SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
3716 
3717   // Allow Q regs and just interpret them as the two D sub-registers.
3718   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3719     Reg = getDRegFromQReg(Reg);
3720     EReg = MRI->getEncodingValue(Reg);
3721     Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3722     ++Reg;
3723   }
3724   const MCRegisterClass *RC;
3725   if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3726     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3727   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3728     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3729   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3730     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3731   else
3732     return Error(RegLoc, "invalid register in register list");
3733 
3734   // Store the register.
3735   EReg = MRI->getEncodingValue(Reg);
3736   Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3737 
3738   // This starts immediately after the first register token in the list,
3739   // so we can see either a comma or a minus (range separator) as a legal
3740   // next token.
3741   while (Parser.getTok().is(AsmToken::Comma) ||
3742          Parser.getTok().is(AsmToken::Minus)) {
3743     if (Parser.getTok().is(AsmToken::Minus)) {
3744       Parser.Lex(); // Eat the minus.
3745       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3746       int EndReg = tryParseRegister();
3747       if (EndReg == -1)
3748         return Error(AfterMinusLoc, "register expected");
3749       // Allow Q regs and just interpret them as the two D sub-registers.
3750       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3751         EndReg = getDRegFromQReg(EndReg) + 1;
3752       // If the register is the same as the start reg, there's nothing
3753       // more to do.
3754       if (Reg == EndReg)
3755         continue;
3756       // The register must be in the same register class as the first.
3757       if (!RC->contains(EndReg))
3758         return Error(AfterMinusLoc, "invalid register in register list");
3759       // Ranges must go from low to high.
3760       if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3761         return Error(AfterMinusLoc, "bad range in register list");
3762 
3763       // Add all the registers in the range to the register list.
3764       while (Reg != EndReg) {
3765         Reg = getNextRegister(Reg);
3766         EReg = MRI->getEncodingValue(Reg);
3767         Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3768       }
3769       continue;
3770     }
3771     Parser.Lex(); // Eat the comma.
3772     RegLoc = Parser.getTok().getLoc();
3773     int OldReg = Reg;
3774     const AsmToken RegTok = Parser.getTok();
3775     Reg = tryParseRegister();
3776     if (Reg == -1)
3777       return Error(RegLoc, "register expected");
3778     // Allow Q regs and just interpret them as the two D sub-registers.
3779     bool isQReg = false;
3780     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3781       Reg = getDRegFromQReg(Reg);
3782       isQReg = true;
3783     }
3784     // The register must be in the same register class as the first.
3785     if (!RC->contains(Reg))
3786       return Error(RegLoc, "invalid register in register list");
3787     // List must be monotonically increasing.
3788     if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3789       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3790         Warning(RegLoc, "register list not in ascending order");
3791       else
3792         return Error(RegLoc, "register list not in ascending order");
3793     }
3794     if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3795       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3796               ") in register list");
3797       continue;
3798     }
3799     // VFP register lists must also be contiguous.
3800     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3801         Reg != OldReg + 1)
3802       return Error(RegLoc, "non-contiguous register range");
3803     EReg = MRI->getEncodingValue(Reg);
3804     Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3805     if (isQReg) {
3806       EReg = MRI->getEncodingValue(++Reg);
3807       Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3808     }
3809   }
3810 
3811   if (Parser.getTok().isNot(AsmToken::RCurly))
3812     return Error(Parser.getTok().getLoc(), "'}' expected");
3813   SMLoc E = Parser.getTok().getEndLoc();
3814   Parser.Lex(); // Eat '}' token.
3815 
3816   // Push the register list operand.
3817   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3818 
3819   // The ARM system instruction variants for LDM/STM have a '^' token here.
3820   if (Parser.getTok().is(AsmToken::Caret)) {
3821     Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3822     Parser.Lex(); // Eat '^' token.
3823   }
3824 
3825   return false;
3826 }
3827 
3828 // Helper function to parse the lane index for vector lists.
3829 OperandMatchResultTy ARMAsmParser::
3830 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3831   MCAsmParser &Parser = getParser();
3832   Index = 0; // Always return a defined index value.
3833   if (Parser.getTok().is(AsmToken::LBrac)) {
3834     Parser.Lex(); // Eat the '['.
3835     if (Parser.getTok().is(AsmToken::RBrac)) {
3836       // "Dn[]" is the 'all lanes' syntax.
3837       LaneKind = AllLanes;
3838       EndLoc = Parser.getTok().getEndLoc();
3839       Parser.Lex(); // Eat the ']'.
3840       return MatchOperand_Success;
3841     }
3842 
3843     // There's an optional '#' token here. Normally there wouldn't be, but
3844     // inline assemble puts one in, and it's friendly to accept that.
3845     if (Parser.getTok().is(AsmToken::Hash))
3846       Parser.Lex(); // Eat '#' or '$'.
3847 
3848     const MCExpr *LaneIndex;
3849     SMLoc Loc = Parser.getTok().getLoc();
3850     if (getParser().parseExpression(LaneIndex)) {
3851       Error(Loc, "illegal expression");
3852       return MatchOperand_ParseFail;
3853     }
3854     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3855     if (!CE) {
3856       Error(Loc, "lane index must be empty or an integer");
3857       return MatchOperand_ParseFail;
3858     }
3859     if (Parser.getTok().isNot(AsmToken::RBrac)) {
3860       Error(Parser.getTok().getLoc(), "']' expected");
3861       return MatchOperand_ParseFail;
3862     }
3863     EndLoc = Parser.getTok().getEndLoc();
3864     Parser.Lex(); // Eat the ']'.
3865     int64_t Val = CE->getValue();
3866 
3867     // FIXME: Make this range check context sensitive for .8, .16, .32.
3868     if (Val < 0 || Val > 7) {
3869       Error(Parser.getTok().getLoc(), "lane index out of range");
3870       return MatchOperand_ParseFail;
3871     }
3872     Index = Val;
3873     LaneKind = IndexedLane;
3874     return MatchOperand_Success;
3875   }
3876   LaneKind = NoLanes;
3877   return MatchOperand_Success;
3878 }
3879 
3880 // parse a vector register list
3881 OperandMatchResultTy
3882 ARMAsmParser::parseVectorList(OperandVector &Operands) {
3883   MCAsmParser &Parser = getParser();
3884   VectorLaneTy LaneKind;
3885   unsigned LaneIndex;
3886   SMLoc S = Parser.getTok().getLoc();
3887   // As an extension (to match gas), support a plain D register or Q register
3888   // (without encosing curly braces) as a single or double entry list,
3889   // respectively.
3890   if (Parser.getTok().is(AsmToken::Identifier)) {
3891     SMLoc E = Parser.getTok().getEndLoc();
3892     int Reg = tryParseRegister();
3893     if (Reg == -1)
3894       return MatchOperand_NoMatch;
3895     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3896       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3897       if (Res != MatchOperand_Success)
3898         return Res;
3899       switch (LaneKind) {
3900       case NoLanes:
3901         Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3902         break;
3903       case AllLanes:
3904         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3905                                                                 S, E));
3906         break;
3907       case IndexedLane:
3908         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3909                                                                LaneIndex,
3910                                                                false, S, E));
3911         break;
3912       }
3913       return MatchOperand_Success;
3914     }
3915     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3916       Reg = getDRegFromQReg(Reg);
3917       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3918       if (Res != MatchOperand_Success)
3919         return Res;
3920       switch (LaneKind) {
3921       case NoLanes:
3922         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3923                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3924         Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3925         break;
3926       case AllLanes:
3927         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3928                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3929         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3930                                                                 S, E));
3931         break;
3932       case IndexedLane:
3933         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3934                                                                LaneIndex,
3935                                                                false, S, E));
3936         break;
3937       }
3938       return MatchOperand_Success;
3939     }
3940     Error(S, "vector register expected");
3941     return MatchOperand_ParseFail;
3942   }
3943 
3944   if (Parser.getTok().isNot(AsmToken::LCurly))
3945     return MatchOperand_NoMatch;
3946 
3947   Parser.Lex(); // Eat '{' token.
3948   SMLoc RegLoc = Parser.getTok().getLoc();
3949 
3950   int Reg = tryParseRegister();
3951   if (Reg == -1) {
3952     Error(RegLoc, "register expected");
3953     return MatchOperand_ParseFail;
3954   }
3955   unsigned Count = 1;
3956   int Spacing = 0;
3957   unsigned FirstReg = Reg;
3958   // The list is of D registers, but we also allow Q regs and just interpret
3959   // them as the two D sub-registers.
3960   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3961     FirstReg = Reg = getDRegFromQReg(Reg);
3962     Spacing = 1; // double-spacing requires explicit D registers, otherwise
3963                  // it's ambiguous with four-register single spaced.
3964     ++Reg;
3965     ++Count;
3966   }
3967 
3968   SMLoc E;
3969   if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3970     return MatchOperand_ParseFail;
3971 
3972   while (Parser.getTok().is(AsmToken::Comma) ||
3973          Parser.getTok().is(AsmToken::Minus)) {
3974     if (Parser.getTok().is(AsmToken::Minus)) {
3975       if (!Spacing)
3976         Spacing = 1; // Register range implies a single spaced list.
3977       else if (Spacing == 2) {
3978         Error(Parser.getTok().getLoc(),
3979               "sequential registers in double spaced list");
3980         return MatchOperand_ParseFail;
3981       }
3982       Parser.Lex(); // Eat the minus.
3983       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3984       int EndReg = tryParseRegister();
3985       if (EndReg == -1) {
3986         Error(AfterMinusLoc, "register expected");
3987         return MatchOperand_ParseFail;
3988       }
3989       // Allow Q regs and just interpret them as the two D sub-registers.
3990       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3991         EndReg = getDRegFromQReg(EndReg) + 1;
3992       // If the register is the same as the start reg, there's nothing
3993       // more to do.
3994       if (Reg == EndReg)
3995         continue;
3996       // The register must be in the same register class as the first.
3997       if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3998         Error(AfterMinusLoc, "invalid register in register list");
3999         return MatchOperand_ParseFail;
4000       }
4001       // Ranges must go from low to high.
4002       if (Reg > EndReg) {
4003         Error(AfterMinusLoc, "bad range in register list");
4004         return MatchOperand_ParseFail;
4005       }
4006       // Parse the lane specifier if present.
4007       VectorLaneTy NextLaneKind;
4008       unsigned NextLaneIndex;
4009       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4010           MatchOperand_Success)
4011         return MatchOperand_ParseFail;
4012       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4013         Error(AfterMinusLoc, "mismatched lane index in register list");
4014         return MatchOperand_ParseFail;
4015       }
4016 
4017       // Add all the registers in the range to the register list.
4018       Count += EndReg - Reg;
4019       Reg = EndReg;
4020       continue;
4021     }
4022     Parser.Lex(); // Eat the comma.
4023     RegLoc = Parser.getTok().getLoc();
4024     int OldReg = Reg;
4025     Reg = tryParseRegister();
4026     if (Reg == -1) {
4027       Error(RegLoc, "register expected");
4028       return MatchOperand_ParseFail;
4029     }
4030     // vector register lists must be contiguous.
4031     // It's OK to use the enumeration values directly here rather, as the
4032     // VFP register classes have the enum sorted properly.
4033     //
4034     // The list is of D registers, but we also allow Q regs and just interpret
4035     // them as the two D sub-registers.
4036     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4037       if (!Spacing)
4038         Spacing = 1; // Register range implies a single spaced list.
4039       else if (Spacing == 2) {
4040         Error(RegLoc,
4041               "invalid register in double-spaced list (must be 'D' register')");
4042         return MatchOperand_ParseFail;
4043       }
4044       Reg = getDRegFromQReg(Reg);
4045       if (Reg != OldReg + 1) {
4046         Error(RegLoc, "non-contiguous register range");
4047         return MatchOperand_ParseFail;
4048       }
4049       ++Reg;
4050       Count += 2;
4051       // Parse the lane specifier if present.
4052       VectorLaneTy NextLaneKind;
4053       unsigned NextLaneIndex;
4054       SMLoc LaneLoc = Parser.getTok().getLoc();
4055       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4056           MatchOperand_Success)
4057         return MatchOperand_ParseFail;
4058       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4059         Error(LaneLoc, "mismatched lane index in register list");
4060         return MatchOperand_ParseFail;
4061       }
4062       continue;
4063     }
4064     // Normal D register.
4065     // Figure out the register spacing (single or double) of the list if
4066     // we don't know it already.
4067     if (!Spacing)
4068       Spacing = 1 + (Reg == OldReg + 2);
4069 
4070     // Just check that it's contiguous and keep going.
4071     if (Reg != OldReg + Spacing) {
4072       Error(RegLoc, "non-contiguous register range");
4073       return MatchOperand_ParseFail;
4074     }
4075     ++Count;
4076     // Parse the lane specifier if present.
4077     VectorLaneTy NextLaneKind;
4078     unsigned NextLaneIndex;
4079     SMLoc EndLoc = Parser.getTok().getLoc();
4080     if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4081       return MatchOperand_ParseFail;
4082     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4083       Error(EndLoc, "mismatched lane index in register list");
4084       return MatchOperand_ParseFail;
4085     }
4086   }
4087 
4088   if (Parser.getTok().isNot(AsmToken::RCurly)) {
4089     Error(Parser.getTok().getLoc(), "'}' expected");
4090     return MatchOperand_ParseFail;
4091   }
4092   E = Parser.getTok().getEndLoc();
4093   Parser.Lex(); // Eat '}' token.
4094 
4095   switch (LaneKind) {
4096   case NoLanes:
4097     // Two-register operands have been converted to the
4098     // composite register classes.
4099     if (Count == 2) {
4100       const MCRegisterClass *RC = (Spacing == 1) ?
4101         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4102         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4103       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4104     }
4105     Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
4106                                                     (Spacing == 2), S, E));
4107     break;
4108   case AllLanes:
4109     // Two-register operands have been converted to the
4110     // composite register classes.
4111     if (Count == 2) {
4112       const MCRegisterClass *RC = (Spacing == 1) ?
4113         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4114         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4115       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4116     }
4117     Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
4118                                                             (Spacing == 2),
4119                                                             S, E));
4120     break;
4121   case IndexedLane:
4122     Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4123                                                            LaneIndex,
4124                                                            (Spacing == 2),
4125                                                            S, E));
4126     break;
4127   }
4128   return MatchOperand_Success;
4129 }
4130 
4131 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4132 OperandMatchResultTy
4133 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4134   MCAsmParser &Parser = getParser();
4135   SMLoc S = Parser.getTok().getLoc();
4136   const AsmToken &Tok = Parser.getTok();
4137   unsigned Opt;
4138 
4139   if (Tok.is(AsmToken::Identifier)) {
4140     StringRef OptStr = Tok.getString();
4141 
4142     Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4143       .Case("sy",    ARM_MB::SY)
4144       .Case("st",    ARM_MB::ST)
4145       .Case("ld",    ARM_MB::LD)
4146       .Case("sh",    ARM_MB::ISH)
4147       .Case("ish",   ARM_MB::ISH)
4148       .Case("shst",  ARM_MB::ISHST)
4149       .Case("ishst", ARM_MB::ISHST)
4150       .Case("ishld", ARM_MB::ISHLD)
4151       .Case("nsh",   ARM_MB::NSH)
4152       .Case("un",    ARM_MB::NSH)
4153       .Case("nshst", ARM_MB::NSHST)
4154       .Case("nshld", ARM_MB::NSHLD)
4155       .Case("unst",  ARM_MB::NSHST)
4156       .Case("osh",   ARM_MB::OSH)
4157       .Case("oshst", ARM_MB::OSHST)
4158       .Case("oshld", ARM_MB::OSHLD)
4159       .Default(~0U);
4160 
4161     // ishld, oshld, nshld and ld are only available from ARMv8.
4162     if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4163                         Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4164       Opt = ~0U;
4165 
4166     if (Opt == ~0U)
4167       return MatchOperand_NoMatch;
4168 
4169     Parser.Lex(); // Eat identifier token.
4170   } else if (Tok.is(AsmToken::Hash) ||
4171              Tok.is(AsmToken::Dollar) ||
4172              Tok.is(AsmToken::Integer)) {
4173     if (Parser.getTok().isNot(AsmToken::Integer))
4174       Parser.Lex(); // Eat '#' or '$'.
4175     SMLoc Loc = Parser.getTok().getLoc();
4176 
4177     const MCExpr *MemBarrierID;
4178     if (getParser().parseExpression(MemBarrierID)) {
4179       Error(Loc, "illegal expression");
4180       return MatchOperand_ParseFail;
4181     }
4182 
4183     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4184     if (!CE) {
4185       Error(Loc, "constant expression expected");
4186       return MatchOperand_ParseFail;
4187     }
4188 
4189     int Val = CE->getValue();
4190     if (Val & ~0xf) {
4191       Error(Loc, "immediate value out of range");
4192       return MatchOperand_ParseFail;
4193     }
4194 
4195     Opt = ARM_MB::RESERVED_0 + Val;
4196   } else
4197     return MatchOperand_ParseFail;
4198 
4199   Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4200   return MatchOperand_Success;
4201 }
4202 
4203 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4204 OperandMatchResultTy
4205 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4206   MCAsmParser &Parser = getParser();
4207   SMLoc S = Parser.getTok().getLoc();
4208   const AsmToken &Tok = Parser.getTok();
4209   unsigned Opt;
4210 
4211   if (Tok.is(AsmToken::Identifier)) {
4212     StringRef OptStr = Tok.getString();
4213 
4214     if (OptStr.equals_lower("sy"))
4215       Opt = ARM_ISB::SY;
4216     else
4217       return MatchOperand_NoMatch;
4218 
4219     Parser.Lex(); // Eat identifier token.
4220   } else if (Tok.is(AsmToken::Hash) ||
4221              Tok.is(AsmToken::Dollar) ||
4222              Tok.is(AsmToken::Integer)) {
4223     if (Parser.getTok().isNot(AsmToken::Integer))
4224       Parser.Lex(); // Eat '#' or '$'.
4225     SMLoc Loc = Parser.getTok().getLoc();
4226 
4227     const MCExpr *ISBarrierID;
4228     if (getParser().parseExpression(ISBarrierID)) {
4229       Error(Loc, "illegal expression");
4230       return MatchOperand_ParseFail;
4231     }
4232 
4233     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
4234     if (!CE) {
4235       Error(Loc, "constant expression expected");
4236       return MatchOperand_ParseFail;
4237     }
4238 
4239     int Val = CE->getValue();
4240     if (Val & ~0xf) {
4241       Error(Loc, "immediate value out of range");
4242       return MatchOperand_ParseFail;
4243     }
4244 
4245     Opt = ARM_ISB::RESERVED_0 + Val;
4246   } else
4247     return MatchOperand_ParseFail;
4248 
4249   Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
4250           (ARM_ISB::InstSyncBOpt)Opt, S));
4251   return MatchOperand_Success;
4252 }
4253 
4254 
4255 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
4256 OperandMatchResultTy
4257 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
4258   MCAsmParser &Parser = getParser();
4259   SMLoc S = Parser.getTok().getLoc();
4260   const AsmToken &Tok = Parser.getTok();
4261   if (!Tok.is(AsmToken::Identifier))
4262     return MatchOperand_NoMatch;
4263   StringRef IFlagsStr = Tok.getString();
4264 
4265   // An iflags string of "none" is interpreted to mean that none of the AIF
4266   // bits are set.  Not a terribly useful instruction, but a valid encoding.
4267   unsigned IFlags = 0;
4268   if (IFlagsStr != "none") {
4269         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
4270       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
4271         .Case("a", ARM_PROC::A)
4272         .Case("i", ARM_PROC::I)
4273         .Case("f", ARM_PROC::F)
4274         .Default(~0U);
4275 
4276       // If some specific iflag is already set, it means that some letter is
4277       // present more than once, this is not acceptable.
4278       if (Flag == ~0U || (IFlags & Flag))
4279         return MatchOperand_NoMatch;
4280 
4281       IFlags |= Flag;
4282     }
4283   }
4284 
4285   Parser.Lex(); // Eat identifier token.
4286   Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
4287   return MatchOperand_Success;
4288 }
4289 
4290 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
4291 OperandMatchResultTy
4292 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
4293   MCAsmParser &Parser = getParser();
4294   SMLoc S = Parser.getTok().getLoc();
4295   const AsmToken &Tok = Parser.getTok();
4296 
4297   if (Tok.is(AsmToken::Integer)) {
4298     int64_t Val = Tok.getIntVal();
4299     if (Val > 255 || Val < 0) {
4300       return MatchOperand_NoMatch;
4301     }
4302     unsigned SYSmvalue = Val & 0xFF;
4303     Parser.Lex();
4304     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4305     return MatchOperand_Success;
4306   }
4307 
4308   if (!Tok.is(AsmToken::Identifier))
4309     return MatchOperand_NoMatch;
4310   StringRef Mask = Tok.getString();
4311 
4312   if (isMClass()) {
4313     auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
4314     if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
4315       return MatchOperand_NoMatch;
4316 
4317     unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
4318 
4319     Parser.Lex(); // Eat identifier token.
4320     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4321     return MatchOperand_Success;
4322   }
4323 
4324   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
4325   size_t Start = 0, Next = Mask.find('_');
4326   StringRef Flags = "";
4327   std::string SpecReg = Mask.slice(Start, Next).lower();
4328   if (Next != StringRef::npos)
4329     Flags = Mask.slice(Next+1, Mask.size());
4330 
4331   // FlagsVal contains the complete mask:
4332   // 3-0: Mask
4333   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4334   unsigned FlagsVal = 0;
4335 
4336   if (SpecReg == "apsr") {
4337     FlagsVal = StringSwitch<unsigned>(Flags)
4338     .Case("nzcvq",  0x8) // same as CPSR_f
4339     .Case("g",      0x4) // same as CPSR_s
4340     .Case("nzcvqg", 0xc) // same as CPSR_fs
4341     .Default(~0U);
4342 
4343     if (FlagsVal == ~0U) {
4344       if (!Flags.empty())
4345         return MatchOperand_NoMatch;
4346       else
4347         FlagsVal = 8; // No flag
4348     }
4349   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
4350     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
4351     if (Flags == "all" || Flags == "")
4352       Flags = "fc";
4353     for (int i = 0, e = Flags.size(); i != e; ++i) {
4354       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
4355       .Case("c", 1)
4356       .Case("x", 2)
4357       .Case("s", 4)
4358       .Case("f", 8)
4359       .Default(~0U);
4360 
4361       // If some specific flag is already set, it means that some letter is
4362       // present more than once, this is not acceptable.
4363       if (Flag == ~0U || (FlagsVal & Flag))
4364         return MatchOperand_NoMatch;
4365       FlagsVal |= Flag;
4366     }
4367   } else // No match for special register.
4368     return MatchOperand_NoMatch;
4369 
4370   // Special register without flags is NOT equivalent to "fc" flags.
4371   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
4372   // two lines would enable gas compatibility at the expense of breaking
4373   // round-tripping.
4374   //
4375   // if (!FlagsVal)
4376   //  FlagsVal = 0x9;
4377 
4378   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4379   if (SpecReg == "spsr")
4380     FlagsVal |= 16;
4381 
4382   Parser.Lex(); // Eat identifier token.
4383   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4384   return MatchOperand_Success;
4385 }
4386 
4387 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
4388 /// use in the MRS/MSR instructions added to support virtualization.
4389 OperandMatchResultTy
4390 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
4391   MCAsmParser &Parser = getParser();
4392   SMLoc S = Parser.getTok().getLoc();
4393   const AsmToken &Tok = Parser.getTok();
4394   if (!Tok.is(AsmToken::Identifier))
4395     return MatchOperand_NoMatch;
4396   StringRef RegName = Tok.getString();
4397 
4398   auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
4399   if (!TheReg)
4400     return MatchOperand_NoMatch;
4401   unsigned Encoding = TheReg->Encoding;
4402 
4403   Parser.Lex(); // Eat identifier token.
4404   Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
4405   return MatchOperand_Success;
4406 }
4407 
4408 OperandMatchResultTy
4409 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4410                           int High) {
4411   MCAsmParser &Parser = getParser();
4412   const AsmToken &Tok = Parser.getTok();
4413   if (Tok.isNot(AsmToken::Identifier)) {
4414     Error(Parser.getTok().getLoc(), Op + " operand expected.");
4415     return MatchOperand_ParseFail;
4416   }
4417   StringRef ShiftName = Tok.getString();
4418   std::string LowerOp = Op.lower();
4419   std::string UpperOp = Op.upper();
4420   if (ShiftName != LowerOp && ShiftName != UpperOp) {
4421     Error(Parser.getTok().getLoc(), Op + " operand expected.");
4422     return MatchOperand_ParseFail;
4423   }
4424   Parser.Lex(); // Eat shift type token.
4425 
4426   // There must be a '#' and a shift amount.
4427   if (Parser.getTok().isNot(AsmToken::Hash) &&
4428       Parser.getTok().isNot(AsmToken::Dollar)) {
4429     Error(Parser.getTok().getLoc(), "'#' expected");
4430     return MatchOperand_ParseFail;
4431   }
4432   Parser.Lex(); // Eat hash token.
4433 
4434   const MCExpr *ShiftAmount;
4435   SMLoc Loc = Parser.getTok().getLoc();
4436   SMLoc EndLoc;
4437   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4438     Error(Loc, "illegal expression");
4439     return MatchOperand_ParseFail;
4440   }
4441   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4442   if (!CE) {
4443     Error(Loc, "constant expression expected");
4444     return MatchOperand_ParseFail;
4445   }
4446   int Val = CE->getValue();
4447   if (Val < Low || Val > High) {
4448     Error(Loc, "immediate value out of range");
4449     return MatchOperand_ParseFail;
4450   }
4451 
4452   Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4453 
4454   return MatchOperand_Success;
4455 }
4456 
4457 OperandMatchResultTy
4458 ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4459   MCAsmParser &Parser = getParser();
4460   const AsmToken &Tok = Parser.getTok();
4461   SMLoc S = Tok.getLoc();
4462   if (Tok.isNot(AsmToken::Identifier)) {
4463     Error(S, "'be' or 'le' operand expected");
4464     return MatchOperand_ParseFail;
4465   }
4466   int Val = StringSwitch<int>(Tok.getString().lower())
4467     .Case("be", 1)
4468     .Case("le", 0)
4469     .Default(-1);
4470   Parser.Lex(); // Eat the token.
4471 
4472   if (Val == -1) {
4473     Error(S, "'be' or 'le' operand expected");
4474     return MatchOperand_ParseFail;
4475   }
4476   Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
4477                                                                   getContext()),
4478                                            S, Tok.getEndLoc()));
4479   return MatchOperand_Success;
4480 }
4481 
4482 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4483 /// instructions. Legal values are:
4484 ///     lsl #n  'n' in [0,31]
4485 ///     asr #n  'n' in [1,32]
4486 ///             n == 32 encoded as n == 0.
4487 OperandMatchResultTy
4488 ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4489   MCAsmParser &Parser = getParser();
4490   const AsmToken &Tok = Parser.getTok();
4491   SMLoc S = Tok.getLoc();
4492   if (Tok.isNot(AsmToken::Identifier)) {
4493     Error(S, "shift operator 'asr' or 'lsl' expected");
4494     return MatchOperand_ParseFail;
4495   }
4496   StringRef ShiftName = Tok.getString();
4497   bool isASR;
4498   if (ShiftName == "lsl" || ShiftName == "LSL")
4499     isASR = false;
4500   else if (ShiftName == "asr" || ShiftName == "ASR")
4501     isASR = true;
4502   else {
4503     Error(S, "shift operator 'asr' or 'lsl' expected");
4504     return MatchOperand_ParseFail;
4505   }
4506   Parser.Lex(); // Eat the operator.
4507 
4508   // A '#' and a shift amount.
4509   if (Parser.getTok().isNot(AsmToken::Hash) &&
4510       Parser.getTok().isNot(AsmToken::Dollar)) {
4511     Error(Parser.getTok().getLoc(), "'#' expected");
4512     return MatchOperand_ParseFail;
4513   }
4514   Parser.Lex(); // Eat hash token.
4515   SMLoc ExLoc = Parser.getTok().getLoc();
4516 
4517   const MCExpr *ShiftAmount;
4518   SMLoc EndLoc;
4519   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4520     Error(ExLoc, "malformed shift expression");
4521     return MatchOperand_ParseFail;
4522   }
4523   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4524   if (!CE) {
4525     Error(ExLoc, "shift amount must be an immediate");
4526     return MatchOperand_ParseFail;
4527   }
4528 
4529   int64_t Val = CE->getValue();
4530   if (isASR) {
4531     // Shift amount must be in [1,32]
4532     if (Val < 1 || Val > 32) {
4533       Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4534       return MatchOperand_ParseFail;
4535     }
4536     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4537     if (isThumb() && Val == 32) {
4538       Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4539       return MatchOperand_ParseFail;
4540     }
4541     if (Val == 32) Val = 0;
4542   } else {
4543     // Shift amount must be in [1,32]
4544     if (Val < 0 || Val > 31) {
4545       Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4546       return MatchOperand_ParseFail;
4547     }
4548   }
4549 
4550   Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4551 
4552   return MatchOperand_Success;
4553 }
4554 
4555 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4556 /// of instructions. Legal values are:
4557 ///     ror #n  'n' in {0, 8, 16, 24}
4558 OperandMatchResultTy
4559 ARMAsmParser::parseRotImm(OperandVector &Operands) {
4560   MCAsmParser &Parser = getParser();
4561   const AsmToken &Tok = Parser.getTok();
4562   SMLoc S = Tok.getLoc();
4563   if (Tok.isNot(AsmToken::Identifier))
4564     return MatchOperand_NoMatch;
4565   StringRef ShiftName = Tok.getString();
4566   if (ShiftName != "ror" && ShiftName != "ROR")
4567     return MatchOperand_NoMatch;
4568   Parser.Lex(); // Eat the operator.
4569 
4570   // A '#' and a rotate amount.
4571   if (Parser.getTok().isNot(AsmToken::Hash) &&
4572       Parser.getTok().isNot(AsmToken::Dollar)) {
4573     Error(Parser.getTok().getLoc(), "'#' expected");
4574     return MatchOperand_ParseFail;
4575   }
4576   Parser.Lex(); // Eat hash token.
4577   SMLoc ExLoc = Parser.getTok().getLoc();
4578 
4579   const MCExpr *ShiftAmount;
4580   SMLoc EndLoc;
4581   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4582     Error(ExLoc, "malformed rotate expression");
4583     return MatchOperand_ParseFail;
4584   }
4585   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4586   if (!CE) {
4587     Error(ExLoc, "rotate amount must be an immediate");
4588     return MatchOperand_ParseFail;
4589   }
4590 
4591   int64_t Val = CE->getValue();
4592   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4593   // normally, zero is represented in asm by omitting the rotate operand
4594   // entirely.
4595   if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4596     Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4597     return MatchOperand_ParseFail;
4598   }
4599 
4600   Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4601 
4602   return MatchOperand_Success;
4603 }
4604 
4605 OperandMatchResultTy
4606 ARMAsmParser::parseModImm(OperandVector &Operands) {
4607   MCAsmParser &Parser = getParser();
4608   MCAsmLexer &Lexer = getLexer();
4609   int64_t Imm1, Imm2;
4610 
4611   SMLoc S = Parser.getTok().getLoc();
4612 
4613   // 1) A mod_imm operand can appear in the place of a register name:
4614   //   add r0, #mod_imm
4615   //   add r0, r0, #mod_imm
4616   // to correctly handle the latter, we bail out as soon as we see an
4617   // identifier.
4618   //
4619   // 2) Similarly, we do not want to parse into complex operands:
4620   //   mov r0, #mod_imm
4621   //   mov r0, :lower16:(_foo)
4622   if (Parser.getTok().is(AsmToken::Identifier) ||
4623       Parser.getTok().is(AsmToken::Colon))
4624     return MatchOperand_NoMatch;
4625 
4626   // Hash (dollar) is optional as per the ARMARM
4627   if (Parser.getTok().is(AsmToken::Hash) ||
4628       Parser.getTok().is(AsmToken::Dollar)) {
4629     // Avoid parsing into complex operands (#:)
4630     if (Lexer.peekTok().is(AsmToken::Colon))
4631       return MatchOperand_NoMatch;
4632 
4633     // Eat the hash (dollar)
4634     Parser.Lex();
4635   }
4636 
4637   SMLoc Sx1, Ex1;
4638   Sx1 = Parser.getTok().getLoc();
4639   const MCExpr *Imm1Exp;
4640   if (getParser().parseExpression(Imm1Exp, Ex1)) {
4641     Error(Sx1, "malformed expression");
4642     return MatchOperand_ParseFail;
4643   }
4644 
4645   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
4646 
4647   if (CE) {
4648     // Immediate must fit within 32-bits
4649     Imm1 = CE->getValue();
4650     int Enc = ARM_AM::getSOImmVal(Imm1);
4651     if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
4652       // We have a match!
4653       Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
4654                                                   (Enc & 0xF00) >> 7,
4655                                                   Sx1, Ex1));
4656       return MatchOperand_Success;
4657     }
4658 
4659     // We have parsed an immediate which is not for us, fallback to a plain
4660     // immediate. This can happen for instruction aliases. For an example,
4661     // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
4662     // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
4663     // instruction with a mod_imm operand. The alias is defined such that the
4664     // parser method is shared, that's why we have to do this here.
4665     if (Parser.getTok().is(AsmToken::EndOfStatement)) {
4666       Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4667       return MatchOperand_Success;
4668     }
4669   } else {
4670     // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
4671     // MCFixup). Fallback to a plain immediate.
4672     Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4673     return MatchOperand_Success;
4674   }
4675 
4676   // From this point onward, we expect the input to be a (#bits, #rot) pair
4677   if (Parser.getTok().isNot(AsmToken::Comma)) {
4678     Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
4679     return MatchOperand_ParseFail;
4680   }
4681 
4682   if (Imm1 & ~0xFF) {
4683     Error(Sx1, "immediate operand must a number in the range [0, 255]");
4684     return MatchOperand_ParseFail;
4685   }
4686 
4687   // Eat the comma
4688   Parser.Lex();
4689 
4690   // Repeat for #rot
4691   SMLoc Sx2, Ex2;
4692   Sx2 = Parser.getTok().getLoc();
4693 
4694   // Eat the optional hash (dollar)
4695   if (Parser.getTok().is(AsmToken::Hash) ||
4696       Parser.getTok().is(AsmToken::Dollar))
4697     Parser.Lex();
4698 
4699   const MCExpr *Imm2Exp;
4700   if (getParser().parseExpression(Imm2Exp, Ex2)) {
4701     Error(Sx2, "malformed expression");
4702     return MatchOperand_ParseFail;
4703   }
4704 
4705   CE = dyn_cast<MCConstantExpr>(Imm2Exp);
4706 
4707   if (CE) {
4708     Imm2 = CE->getValue();
4709     if (!(Imm2 & ~0x1E)) {
4710       // We have a match!
4711       Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
4712       return MatchOperand_Success;
4713     }
4714     Error(Sx2, "immediate operand must an even number in the range [0, 30]");
4715     return MatchOperand_ParseFail;
4716   } else {
4717     Error(Sx2, "constant expression expected");
4718     return MatchOperand_ParseFail;
4719   }
4720 }
4721 
4722 OperandMatchResultTy
4723 ARMAsmParser::parseBitfield(OperandVector &Operands) {
4724   MCAsmParser &Parser = getParser();
4725   SMLoc S = Parser.getTok().getLoc();
4726   // The bitfield descriptor is really two operands, the LSB and the width.
4727   if (Parser.getTok().isNot(AsmToken::Hash) &&
4728       Parser.getTok().isNot(AsmToken::Dollar)) {
4729     Error(Parser.getTok().getLoc(), "'#' expected");
4730     return MatchOperand_ParseFail;
4731   }
4732   Parser.Lex(); // Eat hash token.
4733 
4734   const MCExpr *LSBExpr;
4735   SMLoc E = Parser.getTok().getLoc();
4736   if (getParser().parseExpression(LSBExpr)) {
4737     Error(E, "malformed immediate expression");
4738     return MatchOperand_ParseFail;
4739   }
4740   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
4741   if (!CE) {
4742     Error(E, "'lsb' operand must be an immediate");
4743     return MatchOperand_ParseFail;
4744   }
4745 
4746   int64_t LSB = CE->getValue();
4747   // The LSB must be in the range [0,31]
4748   if (LSB < 0 || LSB > 31) {
4749     Error(E, "'lsb' operand must be in the range [0,31]");
4750     return MatchOperand_ParseFail;
4751   }
4752   E = Parser.getTok().getLoc();
4753 
4754   // Expect another immediate operand.
4755   if (Parser.getTok().isNot(AsmToken::Comma)) {
4756     Error(Parser.getTok().getLoc(), "too few operands");
4757     return MatchOperand_ParseFail;
4758   }
4759   Parser.Lex(); // Eat hash token.
4760   if (Parser.getTok().isNot(AsmToken::Hash) &&
4761       Parser.getTok().isNot(AsmToken::Dollar)) {
4762     Error(Parser.getTok().getLoc(), "'#' expected");
4763     return MatchOperand_ParseFail;
4764   }
4765   Parser.Lex(); // Eat hash token.
4766 
4767   const MCExpr *WidthExpr;
4768   SMLoc EndLoc;
4769   if (getParser().parseExpression(WidthExpr, EndLoc)) {
4770     Error(E, "malformed immediate expression");
4771     return MatchOperand_ParseFail;
4772   }
4773   CE = dyn_cast<MCConstantExpr>(WidthExpr);
4774   if (!CE) {
4775     Error(E, "'width' operand must be an immediate");
4776     return MatchOperand_ParseFail;
4777   }
4778 
4779   int64_t Width = CE->getValue();
4780   // The LSB must be in the range [1,32-lsb]
4781   if (Width < 1 || Width > 32 - LSB) {
4782     Error(E, "'width' operand must be in the range [1,32-lsb]");
4783     return MatchOperand_ParseFail;
4784   }
4785 
4786   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4787 
4788   return MatchOperand_Success;
4789 }
4790 
4791 OperandMatchResultTy
4792 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
4793   // Check for a post-index addressing register operand. Specifically:
4794   // postidx_reg := '+' register {, shift}
4795   //              | '-' register {, shift}
4796   //              | register {, shift}
4797 
4798   // This method must return MatchOperand_NoMatch without consuming any tokens
4799   // in the case where there is no match, as other alternatives take other
4800   // parse methods.
4801   MCAsmParser &Parser = getParser();
4802   AsmToken Tok = Parser.getTok();
4803   SMLoc S = Tok.getLoc();
4804   bool haveEaten = false;
4805   bool isAdd = true;
4806   if (Tok.is(AsmToken::Plus)) {
4807     Parser.Lex(); // Eat the '+' token.
4808     haveEaten = true;
4809   } else if (Tok.is(AsmToken::Minus)) {
4810     Parser.Lex(); // Eat the '-' token.
4811     isAdd = false;
4812     haveEaten = true;
4813   }
4814 
4815   SMLoc E = Parser.getTok().getEndLoc();
4816   int Reg = tryParseRegister();
4817   if (Reg == -1) {
4818     if (!haveEaten)
4819       return MatchOperand_NoMatch;
4820     Error(Parser.getTok().getLoc(), "register expected");
4821     return MatchOperand_ParseFail;
4822   }
4823 
4824   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
4825   unsigned ShiftImm = 0;
4826   if (Parser.getTok().is(AsmToken::Comma)) {
4827     Parser.Lex(); // Eat the ','.
4828     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4829       return MatchOperand_ParseFail;
4830 
4831     // FIXME: Only approximates end...may include intervening whitespace.
4832     E = Parser.getTok().getLoc();
4833   }
4834 
4835   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4836                                                   ShiftImm, S, E));
4837 
4838   return MatchOperand_Success;
4839 }
4840 
4841 OperandMatchResultTy
4842 ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
4843   // Check for a post-index addressing register operand. Specifically:
4844   // am3offset := '+' register
4845   //              | '-' register
4846   //              | register
4847   //              | # imm
4848   //              | # + imm
4849   //              | # - imm
4850 
4851   // This method must return MatchOperand_NoMatch without consuming any tokens
4852   // in the case where there is no match, as other alternatives take other
4853   // parse methods.
4854   MCAsmParser &Parser = getParser();
4855   AsmToken Tok = Parser.getTok();
4856   SMLoc S = Tok.getLoc();
4857 
4858   // Do immediates first, as we always parse those if we have a '#'.
4859   if (Parser.getTok().is(AsmToken::Hash) ||
4860       Parser.getTok().is(AsmToken::Dollar)) {
4861     Parser.Lex(); // Eat '#' or '$'.
4862     // Explicitly look for a '-', as we need to encode negative zero
4863     // differently.
4864     bool isNegative = Parser.getTok().is(AsmToken::Minus);
4865     const MCExpr *Offset;
4866     SMLoc E;
4867     if (getParser().parseExpression(Offset, E))
4868       return MatchOperand_ParseFail;
4869     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4870     if (!CE) {
4871       Error(S, "constant expression expected");
4872       return MatchOperand_ParseFail;
4873     }
4874     // Negative zero is encoded as the flag value
4875     // std::numeric_limits<int32_t>::min().
4876     int32_t Val = CE->getValue();
4877     if (isNegative && Val == 0)
4878       Val = std::numeric_limits<int32_t>::min();
4879 
4880     Operands.push_back(
4881       ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
4882 
4883     return MatchOperand_Success;
4884   }
4885 
4886   bool haveEaten = false;
4887   bool isAdd = true;
4888   if (Tok.is(AsmToken::Plus)) {
4889     Parser.Lex(); // Eat the '+' token.
4890     haveEaten = true;
4891   } else if (Tok.is(AsmToken::Minus)) {
4892     Parser.Lex(); // Eat the '-' token.
4893     isAdd = false;
4894     haveEaten = true;
4895   }
4896 
4897   Tok = Parser.getTok();
4898   int Reg = tryParseRegister();
4899   if (Reg == -1) {
4900     if (!haveEaten)
4901       return MatchOperand_NoMatch;
4902     Error(Tok.getLoc(), "register expected");
4903     return MatchOperand_ParseFail;
4904   }
4905 
4906   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4907                                                   0, S, Tok.getEndLoc()));
4908 
4909   return MatchOperand_Success;
4910 }
4911 
4912 /// Convert parsed operands to MCInst.  Needed here because this instruction
4913 /// only has two register operands, but multiplication is commutative so
4914 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
4915 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
4916                                     const OperandVector &Operands) {
4917   ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
4918   ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
4919   // If we have a three-operand form, make sure to set Rn to be the operand
4920   // that isn't the same as Rd.
4921   unsigned RegOp = 4;
4922   if (Operands.size() == 6 &&
4923       ((ARMOperand &)*Operands[4]).getReg() ==
4924           ((ARMOperand &)*Operands[3]).getReg())
4925     RegOp = 5;
4926   ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
4927   Inst.addOperand(Inst.getOperand(0));
4928   ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
4929 }
4930 
4931 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
4932                                     const OperandVector &Operands) {
4933   int CondOp = -1, ImmOp = -1;
4934   switch(Inst.getOpcode()) {
4935     case ARM::tB:
4936     case ARM::tBcc:  CondOp = 1; ImmOp = 2; break;
4937 
4938     case ARM::t2B:
4939     case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
4940 
4941     default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
4942   }
4943   // first decide whether or not the branch should be conditional
4944   // by looking at it's location relative to an IT block
4945   if(inITBlock()) {
4946     // inside an IT block we cannot have any conditional branches. any
4947     // such instructions needs to be converted to unconditional form
4948     switch(Inst.getOpcode()) {
4949       case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
4950       case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
4951     }
4952   } else {
4953     // outside IT blocks we can only have unconditional branches with AL
4954     // condition code or conditional branches with non-AL condition code
4955     unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
4956     switch(Inst.getOpcode()) {
4957       case ARM::tB:
4958       case ARM::tBcc:
4959         Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
4960         break;
4961       case ARM::t2B:
4962       case ARM::t2Bcc:
4963         Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
4964         break;
4965     }
4966   }
4967 
4968   // now decide on encoding size based on branch target range
4969   switch(Inst.getOpcode()) {
4970     // classify tB as either t2B or t1B based on range of immediate operand
4971     case ARM::tB: {
4972       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4973       if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
4974         Inst.setOpcode(ARM::t2B);
4975       break;
4976     }
4977     // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
4978     case ARM::tBcc: {
4979       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4980       if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
4981         Inst.setOpcode(ARM::t2Bcc);
4982       break;
4983     }
4984   }
4985   ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
4986   ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
4987 }
4988 
4989 /// Parse an ARM memory expression, return false if successful else return true
4990 /// or an error.  The first token must be a '[' when called.
4991 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
4992   MCAsmParser &Parser = getParser();
4993   SMLoc S, E;
4994   if (Parser.getTok().isNot(AsmToken::LBrac))
4995     return TokError("Token is not a Left Bracket");
4996   S = Parser.getTok().getLoc();
4997   Parser.Lex(); // Eat left bracket token.
4998 
4999   const AsmToken &BaseRegTok = Parser.getTok();
5000   int BaseRegNum = tryParseRegister();
5001   if (BaseRegNum == -1)
5002     return Error(BaseRegTok.getLoc(), "register expected");
5003 
5004   // The next token must either be a comma, a colon or a closing bracket.
5005   const AsmToken &Tok = Parser.getTok();
5006   if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5007       !Tok.is(AsmToken::RBrac))
5008     return Error(Tok.getLoc(), "malformed memory operand");
5009 
5010   if (Tok.is(AsmToken::RBrac)) {
5011     E = Tok.getEndLoc();
5012     Parser.Lex(); // Eat right bracket token.
5013 
5014     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5015                                              ARM_AM::no_shift, 0, 0, false,
5016                                              S, E));
5017 
5018     // If there's a pre-indexing writeback marker, '!', just add it as a token
5019     // operand. It's rather odd, but syntactically valid.
5020     if (Parser.getTok().is(AsmToken::Exclaim)) {
5021       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5022       Parser.Lex(); // Eat the '!'.
5023     }
5024 
5025     return false;
5026   }
5027 
5028   assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5029          "Lost colon or comma in memory operand?!");
5030   if (Tok.is(AsmToken::Comma)) {
5031     Parser.Lex(); // Eat the comma.
5032   }
5033 
5034   // If we have a ':', it's an alignment specifier.
5035   if (Parser.getTok().is(AsmToken::Colon)) {
5036     Parser.Lex(); // Eat the ':'.
5037     E = Parser.getTok().getLoc();
5038     SMLoc AlignmentLoc = Tok.getLoc();
5039 
5040     const MCExpr *Expr;
5041     if (getParser().parseExpression(Expr))
5042      return true;
5043 
5044     // The expression has to be a constant. Memory references with relocations
5045     // don't come through here, as they use the <label> forms of the relevant
5046     // instructions.
5047     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5048     if (!CE)
5049       return Error (E, "constant expression expected");
5050 
5051     unsigned Align = 0;
5052     switch (CE->getValue()) {
5053     default:
5054       return Error(E,
5055                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5056     case 16:  Align = 2; break;
5057     case 32:  Align = 4; break;
5058     case 64:  Align = 8; break;
5059     case 128: Align = 16; break;
5060     case 256: Align = 32; break;
5061     }
5062 
5063     // Now we should have the closing ']'
5064     if (Parser.getTok().isNot(AsmToken::RBrac))
5065       return Error(Parser.getTok().getLoc(), "']' expected");
5066     E = Parser.getTok().getEndLoc();
5067     Parser.Lex(); // Eat right bracket token.
5068 
5069     // Don't worry about range checking the value here. That's handled by
5070     // the is*() predicates.
5071     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5072                                              ARM_AM::no_shift, 0, Align,
5073                                              false, S, E, AlignmentLoc));
5074 
5075     // If there's a pre-indexing writeback marker, '!', just add it as a token
5076     // operand.
5077     if (Parser.getTok().is(AsmToken::Exclaim)) {
5078       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5079       Parser.Lex(); // Eat the '!'.
5080     }
5081 
5082     return false;
5083   }
5084 
5085   // If we have a '#', it's an immediate offset, else assume it's a register
5086   // offset. Be friendly and also accept a plain integer (without a leading
5087   // hash) for gas compatibility.
5088   if (Parser.getTok().is(AsmToken::Hash) ||
5089       Parser.getTok().is(AsmToken::Dollar) ||
5090       Parser.getTok().is(AsmToken::Integer)) {
5091     if (Parser.getTok().isNot(AsmToken::Integer))
5092       Parser.Lex(); // Eat '#' or '$'.
5093     E = Parser.getTok().getLoc();
5094 
5095     bool isNegative = getParser().getTok().is(AsmToken::Minus);
5096     const MCExpr *Offset;
5097     if (getParser().parseExpression(Offset))
5098      return true;
5099 
5100     // The expression has to be a constant. Memory references with relocations
5101     // don't come through here, as they use the <label> forms of the relevant
5102     // instructions.
5103     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5104     if (!CE)
5105       return Error (E, "constant expression expected");
5106 
5107     // If the constant was #-0, represent it as
5108     // std::numeric_limits<int32_t>::min().
5109     int32_t Val = CE->getValue();
5110     if (isNegative && Val == 0)
5111       CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5112                                   getContext());
5113 
5114     // Now we should have the closing ']'
5115     if (Parser.getTok().isNot(AsmToken::RBrac))
5116       return Error(Parser.getTok().getLoc(), "']' expected");
5117     E = Parser.getTok().getEndLoc();
5118     Parser.Lex(); // Eat right bracket token.
5119 
5120     // Don't worry about range checking the value here. That's handled by
5121     // the is*() predicates.
5122     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
5123                                              ARM_AM::no_shift, 0, 0,
5124                                              false, S, E));
5125 
5126     // If there's a pre-indexing writeback marker, '!', just add it as a token
5127     // operand.
5128     if (Parser.getTok().is(AsmToken::Exclaim)) {
5129       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5130       Parser.Lex(); // Eat the '!'.
5131     }
5132 
5133     return false;
5134   }
5135 
5136   // The register offset is optionally preceded by a '+' or '-'
5137   bool isNegative = false;
5138   if (Parser.getTok().is(AsmToken::Minus)) {
5139     isNegative = true;
5140     Parser.Lex(); // Eat the '-'.
5141   } else if (Parser.getTok().is(AsmToken::Plus)) {
5142     // Nothing to do.
5143     Parser.Lex(); // Eat the '+'.
5144   }
5145 
5146   E = Parser.getTok().getLoc();
5147   int OffsetRegNum = tryParseRegister();
5148   if (OffsetRegNum == -1)
5149     return Error(E, "register expected");
5150 
5151   // If there's a shift operator, handle it.
5152   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
5153   unsigned ShiftImm = 0;
5154   if (Parser.getTok().is(AsmToken::Comma)) {
5155     Parser.Lex(); // Eat the ','.
5156     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5157       return true;
5158   }
5159 
5160   // Now we should have the closing ']'
5161   if (Parser.getTok().isNot(AsmToken::RBrac))
5162     return Error(Parser.getTok().getLoc(), "']' expected");
5163   E = Parser.getTok().getEndLoc();
5164   Parser.Lex(); // Eat right bracket token.
5165 
5166   Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5167                                            ShiftType, ShiftImm, 0, isNegative,
5168                                            S, E));
5169 
5170   // If there's a pre-indexing writeback marker, '!', just add it as a token
5171   // operand.
5172   if (Parser.getTok().is(AsmToken::Exclaim)) {
5173     Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5174     Parser.Lex(); // Eat the '!'.
5175   }
5176 
5177   return false;
5178 }
5179 
5180 /// parseMemRegOffsetShift - one of these two:
5181 ///   ( lsl | lsr | asr | ror ) , # shift_amount
5182 ///   rrx
5183 /// return true if it parses a shift otherwise it returns false.
5184 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5185                                           unsigned &Amount) {
5186   MCAsmParser &Parser = getParser();
5187   SMLoc Loc = Parser.getTok().getLoc();
5188   const AsmToken &Tok = Parser.getTok();
5189   if (Tok.isNot(AsmToken::Identifier))
5190     return Error(Loc, "illegal shift operator");
5191   StringRef ShiftName = Tok.getString();
5192   if (ShiftName == "lsl" || ShiftName == "LSL" ||
5193       ShiftName == "asl" || ShiftName == "ASL")
5194     St = ARM_AM::lsl;
5195   else if (ShiftName == "lsr" || ShiftName == "LSR")
5196     St = ARM_AM::lsr;
5197   else if (ShiftName == "asr" || ShiftName == "ASR")
5198     St = ARM_AM::asr;
5199   else if (ShiftName == "ror" || ShiftName == "ROR")
5200     St = ARM_AM::ror;
5201   else if (ShiftName == "rrx" || ShiftName == "RRX")
5202     St = ARM_AM::rrx;
5203   else
5204     return Error(Loc, "illegal shift operator");
5205   Parser.Lex(); // Eat shift type token.
5206 
5207   // rrx stands alone.
5208   Amount = 0;
5209   if (St != ARM_AM::rrx) {
5210     Loc = Parser.getTok().getLoc();
5211     // A '#' and a shift amount.
5212     const AsmToken &HashTok = Parser.getTok();
5213     if (HashTok.isNot(AsmToken::Hash) &&
5214         HashTok.isNot(AsmToken::Dollar))
5215       return Error(HashTok.getLoc(), "'#' expected");
5216     Parser.Lex(); // Eat hash token.
5217 
5218     const MCExpr *Expr;
5219     if (getParser().parseExpression(Expr))
5220       return true;
5221     // Range check the immediate.
5222     // lsl, ror: 0 <= imm <= 31
5223     // lsr, asr: 0 <= imm <= 32
5224     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5225     if (!CE)
5226       return Error(Loc, "shift amount must be an immediate");
5227     int64_t Imm = CE->getValue();
5228     if (Imm < 0 ||
5229         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5230         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5231       return Error(Loc, "immediate shift value out of range");
5232     // If <ShiftTy> #0, turn it into a no_shift.
5233     if (Imm == 0)
5234       St = ARM_AM::lsl;
5235     // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5236     if (Imm == 32)
5237       Imm = 0;
5238     Amount = Imm;
5239   }
5240 
5241   return false;
5242 }
5243 
5244 /// parseFPImm - A floating point immediate expression operand.
5245 OperandMatchResultTy
5246 ARMAsmParser::parseFPImm(OperandVector &Operands) {
5247   MCAsmParser &Parser = getParser();
5248   // Anything that can accept a floating point constant as an operand
5249   // needs to go through here, as the regular parseExpression is
5250   // integer only.
5251   //
5252   // This routine still creates a generic Immediate operand, containing
5253   // a bitcast of the 64-bit floating point value. The various operands
5254   // that accept floats can check whether the value is valid for them
5255   // via the standard is*() predicates.
5256 
5257   SMLoc S = Parser.getTok().getLoc();
5258 
5259   if (Parser.getTok().isNot(AsmToken::Hash) &&
5260       Parser.getTok().isNot(AsmToken::Dollar))
5261     return MatchOperand_NoMatch;
5262 
5263   // Disambiguate the VMOV forms that can accept an FP immediate.
5264   // vmov.f32 <sreg>, #imm
5265   // vmov.f64 <dreg>, #imm
5266   // vmov.f32 <dreg>, #imm  @ vector f32x2
5267   // vmov.f32 <qreg>, #imm  @ vector f32x4
5268   //
5269   // There are also the NEON VMOV instructions which expect an
5270   // integer constant. Make sure we don't try to parse an FPImm
5271   // for these:
5272   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5273   ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5274   bool isVmovf = TyOp.isToken() &&
5275                  (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5276                   TyOp.getToken() == ".f16");
5277   ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5278   bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5279                                          Mnemonic.getToken() == "fconsts");
5280   if (!(isVmovf || isFconst))
5281     return MatchOperand_NoMatch;
5282 
5283   Parser.Lex(); // Eat '#' or '$'.
5284 
5285   // Handle negation, as that still comes through as a separate token.
5286   bool isNegative = false;
5287   if (Parser.getTok().is(AsmToken::Minus)) {
5288     isNegative = true;
5289     Parser.Lex();
5290   }
5291   const AsmToken &Tok = Parser.getTok();
5292   SMLoc Loc = Tok.getLoc();
5293   if (Tok.is(AsmToken::Real) && isVmovf) {
5294     APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
5295     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5296     // If we had a '-' in front, toggle the sign bit.
5297     IntVal ^= (uint64_t)isNegative << 31;
5298     Parser.Lex(); // Eat the token.
5299     Operands.push_back(ARMOperand::CreateImm(
5300           MCConstantExpr::create(IntVal, getContext()),
5301           S, Parser.getTok().getLoc()));
5302     return MatchOperand_Success;
5303   }
5304   // Also handle plain integers. Instructions which allow floating point
5305   // immediates also allow a raw encoded 8-bit value.
5306   if (Tok.is(AsmToken::Integer) && isFconst) {
5307     int64_t Val = Tok.getIntVal();
5308     Parser.Lex(); // Eat the token.
5309     if (Val > 255 || Val < 0) {
5310       Error(Loc, "encoded floating point value out of range");
5311       return MatchOperand_ParseFail;
5312     }
5313     float RealVal = ARM_AM::getFPImmFloat(Val);
5314     Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
5315 
5316     Operands.push_back(ARMOperand::CreateImm(
5317         MCConstantExpr::create(Val, getContext()), S,
5318         Parser.getTok().getLoc()));
5319     return MatchOperand_Success;
5320   }
5321 
5322   Error(Loc, "invalid floating point immediate");
5323   return MatchOperand_ParseFail;
5324 }
5325 
5326 /// Parse a arm instruction operand.  For now this parses the operand regardless
5327 /// of the mnemonic.
5328 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
5329   MCAsmParser &Parser = getParser();
5330   SMLoc S, E;
5331 
5332   // Check if the current operand has a custom associated parser, if so, try to
5333   // custom parse the operand, or fallback to the general approach.
5334   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
5335   if (ResTy == MatchOperand_Success)
5336     return false;
5337   // If there wasn't a custom match, try the generic matcher below. Otherwise,
5338   // there was a match, but an error occurred, in which case, just return that
5339   // the operand parsing failed.
5340   if (ResTy == MatchOperand_ParseFail)
5341     return true;
5342 
5343   switch (getLexer().getKind()) {
5344   default:
5345     Error(Parser.getTok().getLoc(), "unexpected token in operand");
5346     return true;
5347   case AsmToken::Identifier: {
5348     // If we've seen a branch mnemonic, the next operand must be a label.  This
5349     // is true even if the label is a register name.  So "br r1" means branch to
5350     // label "r1".
5351     bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
5352     if (!ExpectLabel) {
5353       if (!tryParseRegisterWithWriteBack(Operands))
5354         return false;
5355       int Res = tryParseShiftRegister(Operands);
5356       if (Res == 0) // success
5357         return false;
5358       else if (Res == -1) // irrecoverable error
5359         return true;
5360       // If this is VMRS, check for the apsr_nzcv operand.
5361       if (Mnemonic == "vmrs" &&
5362           Parser.getTok().getString().equals_lower("apsr_nzcv")) {
5363         S = Parser.getTok().getLoc();
5364         Parser.Lex();
5365         Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
5366         return false;
5367       }
5368     }
5369 
5370     // Fall though for the Identifier case that is not a register or a
5371     // special name.
5372     LLVM_FALLTHROUGH;
5373   }
5374   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
5375   case AsmToken::Integer: // things like 1f and 2b as a branch targets
5376   case AsmToken::String:  // quoted label names.
5377   case AsmToken::Dot: {   // . as a branch target
5378     // This was not a register so parse other operands that start with an
5379     // identifier (like labels) as expressions and create them as immediates.
5380     const MCExpr *IdVal;
5381     S = Parser.getTok().getLoc();
5382     if (getParser().parseExpression(IdVal))
5383       return true;
5384     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5385     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
5386     return false;
5387   }
5388   case AsmToken::LBrac:
5389     return parseMemory(Operands);
5390   case AsmToken::LCurly:
5391     return parseRegisterList(Operands);
5392   case AsmToken::Dollar:
5393   case AsmToken::Hash:
5394     // #42 -> immediate.
5395     S = Parser.getTok().getLoc();
5396     Parser.Lex();
5397 
5398     if (Parser.getTok().isNot(AsmToken::Colon)) {
5399       bool isNegative = Parser.getTok().is(AsmToken::Minus);
5400       const MCExpr *ImmVal;
5401       if (getParser().parseExpression(ImmVal))
5402         return true;
5403       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
5404       if (CE) {
5405         int32_t Val = CE->getValue();
5406         if (isNegative && Val == 0)
5407           ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5408                                           getContext());
5409       }
5410       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5411       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
5412 
5413       // There can be a trailing '!' on operands that we want as a separate
5414       // '!' Token operand. Handle that here. For example, the compatibility
5415       // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
5416       if (Parser.getTok().is(AsmToken::Exclaim)) {
5417         Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
5418                                                    Parser.getTok().getLoc()));
5419         Parser.Lex(); // Eat exclaim token
5420       }
5421       return false;
5422     }
5423     // w/ a ':' after the '#', it's just like a plain ':'.
5424     LLVM_FALLTHROUGH;
5425 
5426   case AsmToken::Colon: {
5427     S = Parser.getTok().getLoc();
5428     // ":lower16:" and ":upper16:" expression prefixes
5429     // FIXME: Check it's an expression prefix,
5430     // e.g. (FOO - :lower16:BAR) isn't legal.
5431     ARMMCExpr::VariantKind RefKind;
5432     if (parsePrefix(RefKind))
5433       return true;
5434 
5435     const MCExpr *SubExprVal;
5436     if (getParser().parseExpression(SubExprVal))
5437       return true;
5438 
5439     const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
5440                                               getContext());
5441     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5442     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
5443     return false;
5444   }
5445   case AsmToken::Equal: {
5446     S = Parser.getTok().getLoc();
5447     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5448       return Error(S, "unexpected token in operand");
5449     Parser.Lex(); // Eat '='
5450     const MCExpr *SubExprVal;
5451     if (getParser().parseExpression(SubExprVal))
5452       return true;
5453     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5454 
5455     // execute-only: we assume that assembly programmers know what they are
5456     // doing and allow literal pool creation here
5457     Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
5458     return false;
5459   }
5460   }
5461 }
5462 
5463 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
5464 //  :lower16: and :upper16:.
5465 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
5466   MCAsmParser &Parser = getParser();
5467   RefKind = ARMMCExpr::VK_ARM_None;
5468 
5469   // consume an optional '#' (GNU compatibility)
5470   if (getLexer().is(AsmToken::Hash))
5471     Parser.Lex();
5472 
5473   // :lower16: and :upper16: modifiers
5474   assert(getLexer().is(AsmToken::Colon) && "expected a :");
5475   Parser.Lex(); // Eat ':'
5476 
5477   if (getLexer().isNot(AsmToken::Identifier)) {
5478     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
5479     return true;
5480   }
5481 
5482   enum {
5483     COFF = (1 << MCObjectFileInfo::IsCOFF),
5484     ELF = (1 << MCObjectFileInfo::IsELF),
5485     MACHO = (1 << MCObjectFileInfo::IsMachO),
5486     WASM = (1 << MCObjectFileInfo::IsWasm),
5487   };
5488   static const struct PrefixEntry {
5489     const char *Spelling;
5490     ARMMCExpr::VariantKind VariantKind;
5491     uint8_t SupportedFormats;
5492   } PrefixEntries[] = {
5493     { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
5494     { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
5495   };
5496 
5497   StringRef IDVal = Parser.getTok().getIdentifier();
5498 
5499   const auto &Prefix =
5500       std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
5501                    [&IDVal](const PrefixEntry &PE) {
5502                       return PE.Spelling == IDVal;
5503                    });
5504   if (Prefix == std::end(PrefixEntries)) {
5505     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
5506     return true;
5507   }
5508 
5509   uint8_t CurrentFormat;
5510   switch (getContext().getObjectFileInfo()->getObjectFileType()) {
5511   case MCObjectFileInfo::IsMachO:
5512     CurrentFormat = MACHO;
5513     break;
5514   case MCObjectFileInfo::IsELF:
5515     CurrentFormat = ELF;
5516     break;
5517   case MCObjectFileInfo::IsCOFF:
5518     CurrentFormat = COFF;
5519     break;
5520   case MCObjectFileInfo::IsWasm:
5521     CurrentFormat = WASM;
5522     break;
5523   }
5524 
5525   if (~Prefix->SupportedFormats & CurrentFormat) {
5526     Error(Parser.getTok().getLoc(),
5527           "cannot represent relocation in the current file format");
5528     return true;
5529   }
5530 
5531   RefKind = Prefix->VariantKind;
5532   Parser.Lex();
5533 
5534   if (getLexer().isNot(AsmToken::Colon)) {
5535     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
5536     return true;
5537   }
5538   Parser.Lex(); // Eat the last ':'
5539 
5540   return false;
5541 }
5542 
5543 /// \brief Given a mnemonic, split out possible predication code and carry
5544 /// setting letters to form a canonical mnemonic and flags.
5545 //
5546 // FIXME: Would be nice to autogen this.
5547 // FIXME: This is a bit of a maze of special cases.
5548 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
5549                                       unsigned &PredicationCode,
5550                                       bool &CarrySetting,
5551                                       unsigned &ProcessorIMod,
5552                                       StringRef &ITMask) {
5553   PredicationCode = ARMCC::AL;
5554   CarrySetting = false;
5555   ProcessorIMod = 0;
5556 
5557   // Ignore some mnemonics we know aren't predicated forms.
5558   //
5559   // FIXME: Would be nice to autogen this.
5560   if ((Mnemonic == "movs" && isThumb()) ||
5561       Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
5562       Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
5563       Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
5564       Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
5565       Mnemonic == "vaclt" || Mnemonic == "vacle"  || Mnemonic == "hlt" ||
5566       Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
5567       Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
5568       Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
5569       Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
5570       Mnemonic == "vcvta" || Mnemonic == "vcvtn"  || Mnemonic == "vcvtp" ||
5571       Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
5572       Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
5573       Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
5574       Mnemonic == "bxns"  || Mnemonic == "blxns" ||
5575       Mnemonic == "vudot" || Mnemonic == "vsdot" ||
5576       Mnemonic == "vcmla" || Mnemonic == "vcadd")
5577     return Mnemonic;
5578 
5579   // First, split out any predication code. Ignore mnemonics we know aren't
5580   // predicated but do have a carry-set and so weren't caught above.
5581   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
5582       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
5583       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
5584       Mnemonic != "sbcs" && Mnemonic != "rscs") {
5585     unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
5586     if (CC != ~0U) {
5587       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
5588       PredicationCode = CC;
5589     }
5590   }
5591 
5592   // Next, determine if we have a carry setting bit. We explicitly ignore all
5593   // the instructions we know end in 's'.
5594   if (Mnemonic.endswith("s") &&
5595       !(Mnemonic == "cps" || Mnemonic == "mls" ||
5596         Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
5597         Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
5598         Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
5599         Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
5600         Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
5601         Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
5602         Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
5603         Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
5604         Mnemonic == "bxns" || Mnemonic == "blxns" ||
5605         (Mnemonic == "movs" && isThumb()))) {
5606     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
5607     CarrySetting = true;
5608   }
5609 
5610   // The "cps" instruction can have a interrupt mode operand which is glued into
5611   // the mnemonic. Check if this is the case, split it and parse the imod op
5612   if (Mnemonic.startswith("cps")) {
5613     // Split out any imod code.
5614     unsigned IMod =
5615       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
5616       .Case("ie", ARM_PROC::IE)
5617       .Case("id", ARM_PROC::ID)
5618       .Default(~0U);
5619     if (IMod != ~0U) {
5620       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
5621       ProcessorIMod = IMod;
5622     }
5623   }
5624 
5625   // The "it" instruction has the condition mask on the end of the mnemonic.
5626   if (Mnemonic.startswith("it")) {
5627     ITMask = Mnemonic.slice(2, Mnemonic.size());
5628     Mnemonic = Mnemonic.slice(0, 2);
5629   }
5630 
5631   return Mnemonic;
5632 }
5633 
5634 /// \brief Given a canonical mnemonic, determine if the instruction ever allows
5635 /// inclusion of carry set or predication code operands.
5636 //
5637 // FIXME: It would be nice to autogen this.
5638 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
5639                                          bool &CanAcceptCarrySet,
5640                                          bool &CanAcceptPredicationCode) {
5641   CanAcceptCarrySet =
5642       Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5643       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
5644       Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
5645       Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
5646       Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
5647       Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
5648       Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
5649       (!isThumb() &&
5650        (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
5651         Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
5652 
5653   if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
5654       Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
5655       Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
5656       Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
5657       Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
5658       Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
5659       Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
5660       Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
5661       Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
5662       Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
5663       (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
5664       Mnemonic == "vmovx" || Mnemonic == "vins" ||
5665       Mnemonic == "vudot" || Mnemonic == "vsdot" ||
5666       Mnemonic == "vcmla" || Mnemonic == "vcadd") {
5667     // These mnemonics are never predicable
5668     CanAcceptPredicationCode = false;
5669   } else if (!isThumb()) {
5670     // Some instructions are only predicable in Thumb mode
5671     CanAcceptPredicationCode =
5672         Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
5673         Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
5674         Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
5675         Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
5676         Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
5677         Mnemonic != "stc2" && Mnemonic != "stc2l" &&
5678         !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
5679   } else if (isThumbOne()) {
5680     if (hasV6MOps())
5681       CanAcceptPredicationCode = Mnemonic != "movs";
5682     else
5683       CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
5684   } else
5685     CanAcceptPredicationCode = true;
5686 }
5687 
5688 // \brief Some Thumb instructions have two operand forms that are not
5689 // available as three operand, convert to two operand form if possible.
5690 //
5691 // FIXME: We would really like to be able to tablegen'erate this.
5692 void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
5693                                                  bool CarrySetting,
5694                                                  OperandVector &Operands) {
5695   if (Operands.size() != 6)
5696     return;
5697 
5698   const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5699         auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
5700   if (!Op3.isReg() || !Op4.isReg())
5701     return;
5702 
5703   auto Op3Reg = Op3.getReg();
5704   auto Op4Reg = Op4.getReg();
5705 
5706   // For most Thumb2 cases we just generate the 3 operand form and reduce
5707   // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
5708   // won't accept SP or PC so we do the transformation here taking care
5709   // with immediate range in the 'add sp, sp #imm' case.
5710   auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
5711   if (isThumbTwo()) {
5712     if (Mnemonic != "add")
5713       return;
5714     bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
5715                         (Op5.isReg() && Op5.getReg() == ARM::PC);
5716     if (!TryTransform) {
5717       TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
5718                       (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
5719                      !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
5720                        Op5.isImm() && !Op5.isImm0_508s4());
5721     }
5722     if (!TryTransform)
5723       return;
5724   } else if (!isThumbOne())
5725     return;
5726 
5727   if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
5728         Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5729         Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
5730         Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
5731     return;
5732 
5733   // If first 2 operands of a 3 operand instruction are the same
5734   // then transform to 2 operand version of the same instruction
5735   // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
5736   bool Transform = Op3Reg == Op4Reg;
5737 
5738   // For communtative operations, we might be able to transform if we swap
5739   // Op4 and Op5.  The 'ADD Rdm, SP, Rdm' form is already handled specially
5740   // as tADDrsp.
5741   const ARMOperand *LastOp = &Op5;
5742   bool Swap = false;
5743   if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
5744       ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
5745        Mnemonic == "and" || Mnemonic == "eor" ||
5746        Mnemonic == "adc" || Mnemonic == "orr")) {
5747     Swap = true;
5748     LastOp = &Op4;
5749     Transform = true;
5750   }
5751 
5752   // If both registers are the same then remove one of them from
5753   // the operand list, with certain exceptions.
5754   if (Transform) {
5755     // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
5756     // 2 operand forms don't exist.
5757     if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
5758         LastOp->isReg())
5759       Transform = false;
5760 
5761     // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
5762     // 3-bits because the ARMARM says not to.
5763     if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
5764       Transform = false;
5765   }
5766 
5767   if (Transform) {
5768     if (Swap)
5769       std::swap(Op4, Op5);
5770     Operands.erase(Operands.begin() + 3);
5771   }
5772 }
5773 
5774 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
5775                                           OperandVector &Operands) {
5776   // FIXME: This is all horribly hacky. We really need a better way to deal
5777   // with optional operands like this in the matcher table.
5778 
5779   // The 'mov' mnemonic is special. One variant has a cc_out operand, while
5780   // another does not. Specifically, the MOVW instruction does not. So we
5781   // special case it here and remove the defaulted (non-setting) cc_out
5782   // operand if that's the instruction we're trying to match.
5783   //
5784   // We do this as post-processing of the explicit operands rather than just
5785   // conditionally adding the cc_out in the first place because we need
5786   // to check the type of the parsed immediate operand.
5787   if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
5788       !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
5789       static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
5790       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5791     return true;
5792 
5793   // Register-register 'add' for thumb does not have a cc_out operand
5794   // when there are only two register operands.
5795   if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
5796       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5797       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5798       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5799     return true;
5800   // Register-register 'add' for thumb does not have a cc_out operand
5801   // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
5802   // have to check the immediate range here since Thumb2 has a variant
5803   // that can handle a different range and has a cc_out operand.
5804   if (((isThumb() && Mnemonic == "add") ||
5805        (isThumbTwo() && Mnemonic == "sub")) &&
5806       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5807       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5808       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
5809       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5810       ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
5811        static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
5812     return true;
5813   // For Thumb2, add/sub immediate does not have a cc_out operand for the
5814   // imm0_4095 variant. That's the least-preferred variant when
5815   // selecting via the generic "add" mnemonic, so to know that we
5816   // should remove the cc_out operand, we have to explicitly check that
5817   // it's not one of the other variants. Ugh.
5818   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
5819       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5820       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5821       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
5822     // Nest conditions rather than one big 'if' statement for readability.
5823     //
5824     // If both registers are low, we're in an IT block, and the immediate is
5825     // in range, we should use encoding T1 instead, which has a cc_out.
5826     if (inITBlock() &&
5827         isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
5828         isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
5829         static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
5830       return false;
5831     // Check against T3. If the second register is the PC, this is an
5832     // alternate form of ADR, which uses encoding T4, so check for that too.
5833     if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
5834         static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
5835       return false;
5836 
5837     // Otherwise, we use encoding T4, which does not have a cc_out
5838     // operand.
5839     return true;
5840   }
5841 
5842   // The thumb2 multiply instruction doesn't have a CCOut register, so
5843   // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
5844   // use the 16-bit encoding or not.
5845   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
5846       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5847       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5848       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5849       static_cast<ARMOperand &>(*Operands[5]).isReg() &&
5850       // If the registers aren't low regs, the destination reg isn't the
5851       // same as one of the source regs, or the cc_out operand is zero
5852       // outside of an IT block, we have to use the 32-bit encoding, so
5853       // remove the cc_out operand.
5854       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5855        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5856        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
5857        !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5858                             static_cast<ARMOperand &>(*Operands[5]).getReg() &&
5859                         static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5860                             static_cast<ARMOperand &>(*Operands[4]).getReg())))
5861     return true;
5862 
5863   // Also check the 'mul' syntax variant that doesn't specify an explicit
5864   // destination register.
5865   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
5866       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5867       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5868       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5869       // If the registers aren't low regs  or the cc_out operand is zero
5870       // outside of an IT block, we have to use the 32-bit encoding, so
5871       // remove the cc_out operand.
5872       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5873        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5874        !inITBlock()))
5875     return true;
5876 
5877   // Register-register 'add/sub' for thumb does not have a cc_out operand
5878   // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
5879   // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
5880   // right, this will result in better diagnostics (which operand is off)
5881   // anyway.
5882   if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
5883       (Operands.size() == 5 || Operands.size() == 6) &&
5884       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5885       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
5886       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5887       (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
5888        (Operands.size() == 6 &&
5889         static_cast<ARMOperand &>(*Operands[5]).isImm())))
5890     return true;
5891 
5892   return false;
5893 }
5894 
5895 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
5896                                               OperandVector &Operands) {
5897   // VRINT{Z, X} have a predicate operand in VFP, but not in NEON
5898   unsigned RegIdx = 3;
5899   if ((Mnemonic == "vrintz" || Mnemonic == "vrintx") &&
5900       (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
5901        static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
5902     if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
5903         (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
5904          static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
5905       RegIdx = 4;
5906 
5907     if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
5908         (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
5909              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
5910          ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
5911              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
5912       return true;
5913   }
5914   return false;
5915 }
5916 
5917 static bool isDataTypeToken(StringRef Tok) {
5918   return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
5919     Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
5920     Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
5921     Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
5922     Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
5923     Tok == ".f" || Tok == ".d";
5924 }
5925 
5926 // FIXME: This bit should probably be handled via an explicit match class
5927 // in the .td files that matches the suffix instead of having it be
5928 // a literal string token the way it is now.
5929 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
5930   return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
5931 }
5932 
5933 static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features,
5934                                  unsigned VariantID);
5935 
5936 // The GNU assembler has aliases of ldrd and strd with the second register
5937 // omitted. We don't have a way to do that in tablegen, so fix it up here.
5938 //
5939 // We have to be careful to not emit an invalid Rt2 here, because the rest of
5940 // the assmebly parser could then generate confusing diagnostics refering to
5941 // it. If we do find anything that prevents us from doing the transformation we
5942 // bail out, and let the assembly parser report an error on the instruction as
5943 // it is written.
5944 void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
5945                                      OperandVector &Operands) {
5946   if (Mnemonic != "ldrd" && Mnemonic != "strd")
5947     return;
5948   if (Operands.size() < 4)
5949     return;
5950 
5951   ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
5952   ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5953 
5954   if (!Op2.isReg())
5955     return;
5956   if (!Op3.isMem())
5957     return;
5958 
5959   const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
5960   if (!GPR.contains(Op2.getReg()))
5961     return;
5962 
5963   unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
5964   if (!isThumb() && (RtEncoding & 1)) {
5965     // In ARM mode, the registers must be from an aligned pair, this
5966     // restriction does not apply in Thumb mode.
5967     return;
5968   }
5969   if (Op2.getReg() == ARM::PC)
5970     return;
5971   unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
5972   if (!PairedReg || PairedReg == ARM::PC ||
5973       (PairedReg == ARM::SP && !hasV8Ops()))
5974     return;
5975 
5976   Operands.insert(
5977       Operands.begin() + 3,
5978       ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
5979 }
5980 
5981 /// Parse an arm instruction mnemonic followed by its operands.
5982 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5983                                     SMLoc NameLoc, OperandVector &Operands) {
5984   MCAsmParser &Parser = getParser();
5985 
5986   // Apply mnemonic aliases before doing anything else, as the destination
5987   // mnemonic may include suffices and we want to handle them normally.
5988   // The generic tblgen'erated code does this later, at the start of
5989   // MatchInstructionImpl(), but that's too late for aliases that include
5990   // any sort of suffix.
5991   uint64_t AvailableFeatures = getAvailableFeatures();
5992   unsigned AssemblerDialect = getParser().getAssemblerDialect();
5993   applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5994 
5995   // First check for the ARM-specific .req directive.
5996   if (Parser.getTok().is(AsmToken::Identifier) &&
5997       Parser.getTok().getIdentifier() == ".req") {
5998     parseDirectiveReq(Name, NameLoc);
5999     // We always return 'error' for this, as we're done with this
6000     // statement and don't need to match the 'instruction."
6001     return true;
6002   }
6003 
6004   // Create the leading tokens for the mnemonic, split by '.' characters.
6005   size_t Start = 0, Next = Name.find('.');
6006   StringRef Mnemonic = Name.slice(Start, Next);
6007 
6008   // Split out the predication code and carry setting flag from the mnemonic.
6009   unsigned PredicationCode;
6010   unsigned ProcessorIMod;
6011   bool CarrySetting;
6012   StringRef ITMask;
6013   Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
6014                            ProcessorIMod, ITMask);
6015 
6016   // In Thumb1, only the branch (B) instruction can be predicated.
6017   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
6018     return Error(NameLoc, "conditional execution not supported in Thumb1");
6019   }
6020 
6021   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
6022 
6023   // Handle the IT instruction ITMask. Convert it to a bitmask. This
6024   // is the mask as it will be for the IT encoding if the conditional
6025   // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
6026   // where the conditional bit0 is zero, the instruction post-processing
6027   // will adjust the mask accordingly.
6028   if (Mnemonic == "it") {
6029     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
6030     if (ITMask.size() > 3) {
6031       return Error(Loc, "too many conditions on IT instruction");
6032     }
6033     unsigned Mask = 8;
6034     for (unsigned i = ITMask.size(); i != 0; --i) {
6035       char pos = ITMask[i - 1];
6036       if (pos != 't' && pos != 'e') {
6037         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
6038       }
6039       Mask >>= 1;
6040       if (ITMask[i - 1] == 't')
6041         Mask |= 8;
6042     }
6043     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
6044   }
6045 
6046   // FIXME: This is all a pretty gross hack. We should automatically handle
6047   // optional operands like this via tblgen.
6048 
6049   // Next, add the CCOut and ConditionCode operands, if needed.
6050   //
6051   // For mnemonics which can ever incorporate a carry setting bit or predication
6052   // code, our matching model involves us always generating CCOut and
6053   // ConditionCode operands to match the mnemonic "as written" and then we let
6054   // the matcher deal with finding the right instruction or generating an
6055   // appropriate error.
6056   bool CanAcceptCarrySet, CanAcceptPredicationCode;
6057   getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
6058 
6059   // If we had a carry-set on an instruction that can't do that, issue an
6060   // error.
6061   if (!CanAcceptCarrySet && CarrySetting) {
6062     return Error(NameLoc, "instruction '" + Mnemonic +
6063                  "' can not set flags, but 's' suffix specified");
6064   }
6065   // If we had a predication code on an instruction that can't do that, issue an
6066   // error.
6067   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
6068     return Error(NameLoc, "instruction '" + Mnemonic +
6069                  "' is not predicable, but condition code specified");
6070   }
6071 
6072   // Add the carry setting operand, if necessary.
6073   if (CanAcceptCarrySet) {
6074     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
6075     Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
6076                                                Loc));
6077   }
6078 
6079   // Add the predication code operand, if necessary.
6080   if (CanAcceptPredicationCode) {
6081     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
6082                                       CarrySetting);
6083     Operands.push_back(ARMOperand::CreateCondCode(
6084                          ARMCC::CondCodes(PredicationCode), Loc));
6085   }
6086 
6087   // Add the processor imod operand, if necessary.
6088   if (ProcessorIMod) {
6089     Operands.push_back(ARMOperand::CreateImm(
6090           MCConstantExpr::create(ProcessorIMod, getContext()),
6091                                  NameLoc, NameLoc));
6092   } else if (Mnemonic == "cps" && isMClass()) {
6093     return Error(NameLoc, "instruction 'cps' requires effect for M-class");
6094   }
6095 
6096   // Add the remaining tokens in the mnemonic.
6097   while (Next != StringRef::npos) {
6098     Start = Next;
6099     Next = Name.find('.', Start + 1);
6100     StringRef ExtraToken = Name.slice(Start, Next);
6101 
6102     // Some NEON instructions have an optional datatype suffix that is
6103     // completely ignored. Check for that.
6104     if (isDataTypeToken(ExtraToken) &&
6105         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
6106       continue;
6107 
6108     // For for ARM mode generate an error if the .n qualifier is used.
6109     if (ExtraToken == ".n" && !isThumb()) {
6110       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6111       return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
6112                    "arm mode");
6113     }
6114 
6115     // The .n qualifier is always discarded as that is what the tables
6116     // and matcher expect.  In ARM mode the .w qualifier has no effect,
6117     // so discard it to avoid errors that can be caused by the matcher.
6118     if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
6119       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6120       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
6121     }
6122   }
6123 
6124   // Read the remaining operands.
6125   if (getLexer().isNot(AsmToken::EndOfStatement)) {
6126     // Read the first operand.
6127     if (parseOperand(Operands, Mnemonic)) {
6128       return true;
6129     }
6130 
6131     while (parseOptionalToken(AsmToken::Comma)) {
6132       // Parse and remember the operand.
6133       if (parseOperand(Operands, Mnemonic)) {
6134         return true;
6135       }
6136     }
6137   }
6138 
6139   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
6140     return true;
6141 
6142   tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
6143 
6144   // Some instructions, mostly Thumb, have forms for the same mnemonic that
6145   // do and don't have a cc_out optional-def operand. With some spot-checks
6146   // of the operand list, we can figure out which variant we're trying to
6147   // parse and adjust accordingly before actually matching. We shouldn't ever
6148   // try to remove a cc_out operand that was explicitly set on the
6149   // mnemonic, of course (CarrySetting == true). Reason number #317 the
6150   // table driven matcher doesn't fit well with the ARM instruction set.
6151   if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
6152     Operands.erase(Operands.begin() + 1);
6153 
6154   // Some instructions have the same mnemonic, but don't always
6155   // have a predicate. Distinguish them here and delete the
6156   // predicate if needed.
6157   if (PredicationCode == ARMCC::AL &&
6158       shouldOmitPredicateOperand(Mnemonic, Operands))
6159     Operands.erase(Operands.begin() + 1);
6160 
6161   // ARM mode 'blx' need special handling, as the register operand version
6162   // is predicable, but the label operand version is not. So, we can't rely
6163   // on the Mnemonic based checking to correctly figure out when to put
6164   // a k_CondCode operand in the list. If we're trying to match the label
6165   // version, remove the k_CondCode operand here.
6166   if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
6167       static_cast<ARMOperand &>(*Operands[2]).isImm())
6168     Operands.erase(Operands.begin() + 1);
6169 
6170   // Adjust operands of ldrexd/strexd to MCK_GPRPair.
6171   // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
6172   // a single GPRPair reg operand is used in the .td file to replace the two
6173   // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
6174   // expressed as a GPRPair, so we have to manually merge them.
6175   // FIXME: We would really like to be able to tablegen'erate this.
6176   if (!isThumb() && Operands.size() > 4 &&
6177       (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
6178        Mnemonic == "stlexd")) {
6179     bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
6180     unsigned Idx = isLoad ? 2 : 3;
6181     ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
6182     ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
6183 
6184     const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
6185     // Adjust only if Op1 and Op2 are GPRs.
6186     if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
6187         MRC.contains(Op2.getReg())) {
6188       unsigned Reg1 = Op1.getReg();
6189       unsigned Reg2 = Op2.getReg();
6190       unsigned Rt = MRI->getEncodingValue(Reg1);
6191       unsigned Rt2 = MRI->getEncodingValue(Reg2);
6192 
6193       // Rt2 must be Rt + 1 and Rt must be even.
6194       if (Rt + 1 != Rt2 || (Rt & 1)) {
6195         return Error(Op2.getStartLoc(),
6196                      isLoad ? "destination operands must be sequential"
6197                             : "source operands must be sequential");
6198       }
6199       unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
6200           &(MRI->getRegClass(ARM::GPRPairRegClassID)));
6201       Operands[Idx] =
6202           ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
6203       Operands.erase(Operands.begin() + Idx + 1);
6204     }
6205   }
6206 
6207   // GNU Assembler extension (compatibility).
6208   fixupGNULDRDAlias(Mnemonic, Operands);
6209 
6210   // FIXME: As said above, this is all a pretty gross hack.  This instruction
6211   // does not fit with other "subs" and tblgen.
6212   // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
6213   // so the Mnemonic is the original name "subs" and delete the predicate
6214   // operand so it will match the table entry.
6215   if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
6216       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6217       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
6218       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6219       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
6220       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6221     Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
6222     Operands.erase(Operands.begin() + 1);
6223   }
6224   return false;
6225 }
6226 
6227 // Validate context-sensitive operand constraints.
6228 
6229 // return 'true' if register list contains non-low GPR registers,
6230 // 'false' otherwise. If Reg is in the register list or is HiReg, set
6231 // 'containsReg' to true.
6232 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
6233                                  unsigned Reg, unsigned HiReg,
6234                                  bool &containsReg) {
6235   containsReg = false;
6236   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
6237     unsigned OpReg = Inst.getOperand(i).getReg();
6238     if (OpReg == Reg)
6239       containsReg = true;
6240     // Anything other than a low register isn't legal here.
6241     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
6242       return true;
6243   }
6244   return false;
6245 }
6246 
6247 // Check if the specified regisgter is in the register list of the inst,
6248 // starting at the indicated operand number.
6249 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
6250   for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
6251     unsigned OpReg = Inst.getOperand(i).getReg();
6252     if (OpReg == Reg)
6253       return true;
6254   }
6255   return false;
6256 }
6257 
6258 // Return true if instruction has the interesting property of being
6259 // allowed in IT blocks, but not being predicable.
6260 static bool instIsBreakpoint(const MCInst &Inst) {
6261     return Inst.getOpcode() == ARM::tBKPT ||
6262            Inst.getOpcode() == ARM::BKPT ||
6263            Inst.getOpcode() == ARM::tHLT ||
6264            Inst.getOpcode() == ARM::HLT;
6265 }
6266 
6267 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
6268                                        const OperandVector &Operands,
6269                                        unsigned ListNo, bool IsARPop) {
6270   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6271   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6272 
6273   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6274   bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
6275   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6276 
6277   if (!IsARPop && ListContainsSP)
6278     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6279                  "SP may not be in the register list");
6280   else if (ListContainsPC && ListContainsLR)
6281     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6282                  "PC and LR may not be in the register list simultaneously");
6283   return false;
6284 }
6285 
6286 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
6287                                        const OperandVector &Operands,
6288                                        unsigned ListNo) {
6289   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6290   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6291 
6292   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6293   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6294 
6295   if (ListContainsSP && ListContainsPC)
6296     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6297                  "SP and PC may not be in the register list");
6298   else if (ListContainsSP)
6299     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6300                  "SP may not be in the register list");
6301   else if (ListContainsPC)
6302     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6303                  "PC may not be in the register list");
6304   return false;
6305 }
6306 
6307 // FIXME: We would really like to be able to tablegen'erate this.
6308 bool ARMAsmParser::validateInstruction(MCInst &Inst,
6309                                        const OperandVector &Operands) {
6310   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
6311   SMLoc Loc = Operands[0]->getStartLoc();
6312 
6313   // Check the IT block state first.
6314   // NOTE: BKPT and HLT instructions have the interesting property of being
6315   // allowed in IT blocks, but not being predicable. They just always execute.
6316   if (inITBlock() && !instIsBreakpoint(Inst)) {
6317     // The instruction must be predicable.
6318     if (!MCID.isPredicable())
6319       return Error(Loc, "instructions in IT block must be predicable");
6320     ARMCC::CondCodes Cond = ARMCC::CondCodes(
6321         Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
6322     if (Cond != currentITCond()) {
6323       // Find the condition code Operand to get its SMLoc information.
6324       SMLoc CondLoc;
6325       for (unsigned I = 1; I < Operands.size(); ++I)
6326         if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
6327           CondLoc = Operands[I]->getStartLoc();
6328       return Error(CondLoc, "incorrect condition in IT block; got '" +
6329                                 StringRef(ARMCondCodeToString(Cond)) +
6330                                 "', but expected '" +
6331                                 ARMCondCodeToString(currentITCond()) + "'");
6332     }
6333   // Check for non-'al' condition codes outside of the IT block.
6334   } else if (isThumbTwo() && MCID.isPredicable() &&
6335              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6336              ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
6337              Inst.getOpcode() != ARM::t2Bcc) {
6338     return Error(Loc, "predicated instructions must be in IT block");
6339   } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
6340              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6341                  ARMCC::AL) {
6342     return Warning(Loc, "predicated instructions should be in IT block");
6343   }
6344 
6345   // PC-setting instructions in an IT block, but not the last instruction of
6346   // the block, are UNPREDICTABLE.
6347   if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
6348     return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
6349   }
6350 
6351   const unsigned Opcode = Inst.getOpcode();
6352   switch (Opcode) {
6353   case ARM::LDRD:
6354   case ARM::LDRD_PRE:
6355   case ARM::LDRD_POST: {
6356     const unsigned RtReg = Inst.getOperand(0).getReg();
6357 
6358     // Rt can't be R14.
6359     if (RtReg == ARM::LR)
6360       return Error(Operands[3]->getStartLoc(),
6361                    "Rt can't be R14");
6362 
6363     const unsigned Rt = MRI->getEncodingValue(RtReg);
6364     // Rt must be even-numbered.
6365     if ((Rt & 1) == 1)
6366       return Error(Operands[3]->getStartLoc(),
6367                    "Rt must be even-numbered");
6368 
6369     // Rt2 must be Rt + 1.
6370     const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6371     if (Rt2 != Rt + 1)
6372       return Error(Operands[3]->getStartLoc(),
6373                    "destination operands must be sequential");
6374 
6375     if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
6376       const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
6377       // For addressing modes with writeback, the base register needs to be
6378       // different from the destination registers.
6379       if (Rn == Rt || Rn == Rt2)
6380         return Error(Operands[3]->getStartLoc(),
6381                      "base register needs to be different from destination "
6382                      "registers");
6383     }
6384 
6385     return false;
6386   }
6387   case ARM::t2LDRDi8:
6388   case ARM::t2LDRD_PRE:
6389   case ARM::t2LDRD_POST: {
6390     // Rt2 must be different from Rt.
6391     unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6392     unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6393     if (Rt2 == Rt)
6394       return Error(Operands[3]->getStartLoc(),
6395                    "destination operands can't be identical");
6396     return false;
6397   }
6398   case ARM::t2BXJ: {
6399     const unsigned RmReg = Inst.getOperand(0).getReg();
6400     // Rm = SP is no longer unpredictable in v8-A
6401     if (RmReg == ARM::SP && !hasV8Ops())
6402       return Error(Operands[2]->getStartLoc(),
6403                    "r13 (SP) is an unpredictable operand to BXJ");
6404     return false;
6405   }
6406   case ARM::STRD: {
6407     // Rt2 must be Rt + 1.
6408     unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6409     unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6410     if (Rt2 != Rt + 1)
6411       return Error(Operands[3]->getStartLoc(),
6412                    "source operands must be sequential");
6413     return false;
6414   }
6415   case ARM::STRD_PRE:
6416   case ARM::STRD_POST: {
6417     // Rt2 must be Rt + 1.
6418     unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6419     unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6420     if (Rt2 != Rt + 1)
6421       return Error(Operands[3]->getStartLoc(),
6422                    "source operands must be sequential");
6423     return false;
6424   }
6425   case ARM::STR_PRE_IMM:
6426   case ARM::STR_PRE_REG:
6427   case ARM::STR_POST_IMM:
6428   case ARM::STR_POST_REG:
6429   case ARM::STRH_PRE:
6430   case ARM::STRH_POST:
6431   case ARM::STRB_PRE_IMM:
6432   case ARM::STRB_PRE_REG:
6433   case ARM::STRB_POST_IMM:
6434   case ARM::STRB_POST_REG: {
6435     // Rt must be different from Rn.
6436     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6437     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6438 
6439     if (Rt == Rn)
6440       return Error(Operands[3]->getStartLoc(),
6441                    "source register and base register can't be identical");
6442     return false;
6443   }
6444   case ARM::LDR_PRE_IMM:
6445   case ARM::LDR_PRE_REG:
6446   case ARM::LDR_POST_IMM:
6447   case ARM::LDR_POST_REG:
6448   case ARM::LDRH_PRE:
6449   case ARM::LDRH_POST:
6450   case ARM::LDRSH_PRE:
6451   case ARM::LDRSH_POST:
6452   case ARM::LDRB_PRE_IMM:
6453   case ARM::LDRB_PRE_REG:
6454   case ARM::LDRB_POST_IMM:
6455   case ARM::LDRB_POST_REG:
6456   case ARM::LDRSB_PRE:
6457   case ARM::LDRSB_POST: {
6458     // Rt must be different from Rn.
6459     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6460     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6461 
6462     if (Rt == Rn)
6463       return Error(Operands[3]->getStartLoc(),
6464                    "destination register and base register can't be identical");
6465     return false;
6466   }
6467   case ARM::SBFX:
6468   case ARM::UBFX: {
6469     // Width must be in range [1, 32-lsb].
6470     unsigned LSB = Inst.getOperand(2).getImm();
6471     unsigned Widthm1 = Inst.getOperand(3).getImm();
6472     if (Widthm1 >= 32 - LSB)
6473       return Error(Operands[5]->getStartLoc(),
6474                    "bitfield width must be in range [1,32-lsb]");
6475     return false;
6476   }
6477   // Notionally handles ARM::tLDMIA_UPD too.
6478   case ARM::tLDMIA: {
6479     // If we're parsing Thumb2, the .w variant is available and handles
6480     // most cases that are normally illegal for a Thumb1 LDM instruction.
6481     // We'll make the transformation in processInstruction() if necessary.
6482     //
6483     // Thumb LDM instructions are writeback iff the base register is not
6484     // in the register list.
6485     unsigned Rn = Inst.getOperand(0).getReg();
6486     bool HasWritebackToken =
6487         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6488          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
6489     bool ListContainsBase;
6490     if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
6491       return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
6492                    "registers must be in range r0-r7");
6493     // If we should have writeback, then there should be a '!' token.
6494     if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
6495       return Error(Operands[2]->getStartLoc(),
6496                    "writeback operator '!' expected");
6497     // If we should not have writeback, there must not be a '!'. This is
6498     // true even for the 32-bit wide encodings.
6499     if (ListContainsBase && HasWritebackToken)
6500       return Error(Operands[3]->getStartLoc(),
6501                    "writeback operator '!' not allowed when base register "
6502                    "in register list");
6503 
6504     if (validatetLDMRegList(Inst, Operands, 3))
6505       return true;
6506     break;
6507   }
6508   case ARM::LDMIA_UPD:
6509   case ARM::LDMDB_UPD:
6510   case ARM::LDMIB_UPD:
6511   case ARM::LDMDA_UPD:
6512     // ARM variants loading and updating the same register are only officially
6513     // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
6514     if (!hasV7Ops())
6515       break;
6516     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6517       return Error(Operands.back()->getStartLoc(),
6518                    "writeback register not allowed in register list");
6519     break;
6520   case ARM::t2LDMIA:
6521   case ARM::t2LDMDB:
6522     if (validatetLDMRegList(Inst, Operands, 3))
6523       return true;
6524     break;
6525   case ARM::t2STMIA:
6526   case ARM::t2STMDB:
6527     if (validatetSTMRegList(Inst, Operands, 3))
6528       return true;
6529     break;
6530   case ARM::t2LDMIA_UPD:
6531   case ARM::t2LDMDB_UPD:
6532   case ARM::t2STMIA_UPD:
6533   case ARM::t2STMDB_UPD:
6534     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6535       return Error(Operands.back()->getStartLoc(),
6536                    "writeback register not allowed in register list");
6537 
6538     if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
6539       if (validatetLDMRegList(Inst, Operands, 3))
6540         return true;
6541     } else {
6542       if (validatetSTMRegList(Inst, Operands, 3))
6543         return true;
6544     }
6545     break;
6546 
6547   case ARM::sysLDMIA_UPD:
6548   case ARM::sysLDMDA_UPD:
6549   case ARM::sysLDMDB_UPD:
6550   case ARM::sysLDMIB_UPD:
6551     if (!listContainsReg(Inst, 3, ARM::PC))
6552       return Error(Operands[4]->getStartLoc(),
6553                    "writeback register only allowed on system LDM "
6554                    "if PC in register-list");
6555     break;
6556   case ARM::sysSTMIA_UPD:
6557   case ARM::sysSTMDA_UPD:
6558   case ARM::sysSTMDB_UPD:
6559   case ARM::sysSTMIB_UPD:
6560     return Error(Operands[2]->getStartLoc(),
6561                  "system STM cannot have writeback register");
6562   case ARM::tMUL:
6563     // The second source operand must be the same register as the destination
6564     // operand.
6565     //
6566     // In this case, we must directly check the parsed operands because the
6567     // cvtThumbMultiply() function is written in such a way that it guarantees
6568     // this first statement is always true for the new Inst.  Essentially, the
6569     // destination is unconditionally copied into the second source operand
6570     // without checking to see if it matches what we actually parsed.
6571     if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
6572                                  ((ARMOperand &)*Operands[5]).getReg()) &&
6573         (((ARMOperand &)*Operands[3]).getReg() !=
6574          ((ARMOperand &)*Operands[4]).getReg())) {
6575       return Error(Operands[3]->getStartLoc(),
6576                    "destination register must match source register");
6577     }
6578     break;
6579 
6580   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
6581   // so only issue a diagnostic for thumb1. The instructions will be
6582   // switched to the t2 encodings in processInstruction() if necessary.
6583   case ARM::tPOP: {
6584     bool ListContainsBase;
6585     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
6586         !isThumbTwo())
6587       return Error(Operands[2]->getStartLoc(),
6588                    "registers must be in range r0-r7 or pc");
6589     if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
6590       return true;
6591     break;
6592   }
6593   case ARM::tPUSH: {
6594     bool ListContainsBase;
6595     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
6596         !isThumbTwo())
6597       return Error(Operands[2]->getStartLoc(),
6598                    "registers must be in range r0-r7 or lr");
6599     if (validatetSTMRegList(Inst, Operands, 2))
6600       return true;
6601     break;
6602   }
6603   case ARM::tSTMIA_UPD: {
6604     bool ListContainsBase, InvalidLowList;
6605     InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
6606                                           0, ListContainsBase);
6607     if (InvalidLowList && !isThumbTwo())
6608       return Error(Operands[4]->getStartLoc(),
6609                    "registers must be in range r0-r7");
6610 
6611     // This would be converted to a 32-bit stm, but that's not valid if the
6612     // writeback register is in the list.
6613     if (InvalidLowList && ListContainsBase)
6614       return Error(Operands[4]->getStartLoc(),
6615                    "writeback operator '!' not allowed when base register "
6616                    "in register list");
6617 
6618     if (validatetSTMRegList(Inst, Operands, 4))
6619       return true;
6620     break;
6621   }
6622   case ARM::tADDrSP:
6623     // If the non-SP source operand and the destination operand are not the
6624     // same, we need thumb2 (for the wide encoding), or we have an error.
6625     if (!isThumbTwo() &&
6626         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
6627       return Error(Operands[4]->getStartLoc(),
6628                    "source register must be the same as destination");
6629     }
6630     break;
6631 
6632   // Final range checking for Thumb unconditional branch instructions.
6633   case ARM::tB:
6634     if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
6635       return Error(Operands[2]->getStartLoc(), "branch target out of range");
6636     break;
6637   case ARM::t2B: {
6638     int op = (Operands[2]->isImm()) ? 2 : 3;
6639     if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
6640       return Error(Operands[op]->getStartLoc(), "branch target out of range");
6641     break;
6642   }
6643   // Final range checking for Thumb conditional branch instructions.
6644   case ARM::tBcc:
6645     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
6646       return Error(Operands[2]->getStartLoc(), "branch target out of range");
6647     break;
6648   case ARM::t2Bcc: {
6649     int Op = (Operands[2]->isImm()) ? 2 : 3;
6650     if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
6651       return Error(Operands[Op]->getStartLoc(), "branch target out of range");
6652     break;
6653   }
6654   case ARM::tCBZ:
6655   case ARM::tCBNZ: {
6656     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
6657       return Error(Operands[2]->getStartLoc(), "branch target out of range");
6658     break;
6659   }
6660   case ARM::MOVi16:
6661   case ARM::MOVTi16:
6662   case ARM::t2MOVi16:
6663   case ARM::t2MOVTi16:
6664     {
6665     // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
6666     // especially when we turn it into a movw and the expression <symbol> does
6667     // not have a :lower16: or :upper16 as part of the expression.  We don't
6668     // want the behavior of silently truncating, which can be unexpected and
6669     // lead to bugs that are difficult to find since this is an easy mistake
6670     // to make.
6671     int i = (Operands[3]->isImm()) ? 3 : 4;
6672     ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
6673     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6674     if (CE) break;
6675     const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6676     if (!E) break;
6677     const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6678     if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
6679                        ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
6680       return Error(
6681           Op.getStartLoc(),
6682           "immediate expression for mov requires :lower16: or :upper16");
6683     break;
6684   }
6685   case ARM::HINT:
6686   case ARM::t2HINT: {
6687     unsigned Imm8 = Inst.getOperand(0).getImm();
6688     unsigned Pred = Inst.getOperand(1).getImm();
6689     // ESB is not predicable (pred must be AL). Without the RAS extension, this
6690     // behaves as any other unallocated hint.
6691     if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
6692       return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
6693                                                "predicable, but condition "
6694                                                "code specified");
6695     if (Imm8 == 0x14 && Pred != ARMCC::AL)
6696       return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
6697                                                "predicable, but condition "
6698                                                "code specified");
6699     break;
6700   }
6701   case ARM::VMOVRRS: {
6702     // Source registers must be sequential.
6703     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6704     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
6705     if (Sm1 != Sm + 1)
6706       return Error(Operands[5]->getStartLoc(),
6707                    "source operands must be sequential");
6708     break;
6709   }
6710   case ARM::VMOVSRR: {
6711     // Destination registers must be sequential.
6712     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6713     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6714     if (Sm1 != Sm + 1)
6715       return Error(Operands[3]->getStartLoc(),
6716                    "destination operands must be sequential");
6717     break;
6718   }
6719   }
6720 
6721   return false;
6722 }
6723 
6724 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
6725   switch(Opc) {
6726   default: llvm_unreachable("unexpected opcode!");
6727   // VST1LN
6728   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
6729   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6730   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6731   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
6732   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6733   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6734   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
6735   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
6736   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
6737 
6738   // VST2LN
6739   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
6740   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6741   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6742   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6743   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6744 
6745   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
6746   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6747   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6748   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6749   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6750 
6751   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
6752   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
6753   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
6754   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
6755   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
6756 
6757   // VST3LN
6758   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
6759   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6760   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6761   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
6762   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6763   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
6764   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6765   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6766   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
6767   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6768   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
6769   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
6770   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
6771   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
6772   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
6773 
6774   // VST3
6775   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
6776   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6777   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6778   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
6779   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6780   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6781   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
6782   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6783   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6784   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
6785   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6786   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6787   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
6788   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
6789   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
6790   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
6791   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
6792   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
6793 
6794   // VST4LN
6795   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
6796   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6797   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6798   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
6799   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6800   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
6801   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6802   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6803   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
6804   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6805   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
6806   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
6807   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
6808   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
6809   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
6810 
6811   // VST4
6812   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
6813   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6814   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6815   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
6816   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6817   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6818   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
6819   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6820   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6821   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
6822   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6823   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6824   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
6825   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
6826   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
6827   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
6828   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
6829   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
6830   }
6831 }
6832 
6833 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
6834   switch(Opc) {
6835   default: llvm_unreachable("unexpected opcode!");
6836   // VLD1LN
6837   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
6838   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6839   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6840   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
6841   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6842   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6843   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
6844   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
6845   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
6846 
6847   // VLD2LN
6848   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
6849   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6850   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6851   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
6852   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6853   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
6854   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6855   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6856   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
6857   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6858   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
6859   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
6860   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
6861   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
6862   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
6863 
6864   // VLD3DUP
6865   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
6866   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6867   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6868   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
6869   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6870   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6871   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
6872   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6873   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6874   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
6875   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6876   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6877   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
6878   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
6879   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
6880   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
6881   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
6882   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
6883 
6884   // VLD3LN
6885   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
6886   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6887   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6888   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
6889   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6890   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
6891   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6892   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6893   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
6894   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6895   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
6896   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
6897   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
6898   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
6899   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
6900 
6901   // VLD3
6902   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
6903   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6904   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6905   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
6906   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6907   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6908   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
6909   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6910   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6911   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
6912   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6913   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6914   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
6915   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
6916   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
6917   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
6918   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
6919   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
6920 
6921   // VLD4LN
6922   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
6923   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6924   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6925   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6926   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6927   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
6928   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6929   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6930   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6931   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6932   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
6933   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
6934   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
6935   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
6936   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
6937 
6938   // VLD4DUP
6939   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
6940   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6941   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6942   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
6943   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
6944   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6945   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
6946   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6947   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6948   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
6949   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
6950   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6951   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
6952   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
6953   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
6954   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
6955   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
6956   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
6957 
6958   // VLD4
6959   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
6960   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6961   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6962   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
6963   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6964   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6965   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
6966   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6967   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6968   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
6969   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6970   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6971   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
6972   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
6973   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
6974   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
6975   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
6976   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
6977   }
6978 }
6979 
6980 bool ARMAsmParser::processInstruction(MCInst &Inst,
6981                                       const OperandVector &Operands,
6982                                       MCStreamer &Out) {
6983   // Check if we have the wide qualifier, because if it's present we
6984   // must avoid selecting a 16-bit thumb instruction.
6985   bool HasWideQualifier = false;
6986   for (auto &Op : Operands) {
6987     ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
6988     if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
6989       HasWideQualifier = true;
6990       break;
6991     }
6992   }
6993 
6994   switch (Inst.getOpcode()) {
6995   // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
6996   case ARM::LDRT_POST:
6997   case ARM::LDRBT_POST: {
6998     const unsigned Opcode =
6999       (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
7000                                            : ARM::LDRBT_POST_IMM;
7001     MCInst TmpInst;
7002     TmpInst.setOpcode(Opcode);
7003     TmpInst.addOperand(Inst.getOperand(0));
7004     TmpInst.addOperand(Inst.getOperand(1));
7005     TmpInst.addOperand(Inst.getOperand(1));
7006     TmpInst.addOperand(MCOperand::createReg(0));
7007     TmpInst.addOperand(MCOperand::createImm(0));
7008     TmpInst.addOperand(Inst.getOperand(2));
7009     TmpInst.addOperand(Inst.getOperand(3));
7010     Inst = TmpInst;
7011     return true;
7012   }
7013   // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
7014   case ARM::STRT_POST:
7015   case ARM::STRBT_POST: {
7016     const unsigned Opcode =
7017       (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
7018                                            : ARM::STRBT_POST_IMM;
7019     MCInst TmpInst;
7020     TmpInst.setOpcode(Opcode);
7021     TmpInst.addOperand(Inst.getOperand(1));
7022     TmpInst.addOperand(Inst.getOperand(0));
7023     TmpInst.addOperand(Inst.getOperand(1));
7024     TmpInst.addOperand(MCOperand::createReg(0));
7025     TmpInst.addOperand(MCOperand::createImm(0));
7026     TmpInst.addOperand(Inst.getOperand(2));
7027     TmpInst.addOperand(Inst.getOperand(3));
7028     Inst = TmpInst;
7029     return true;
7030   }
7031   // Alias for alternate form of 'ADR Rd, #imm' instruction.
7032   case ARM::ADDri: {
7033     if (Inst.getOperand(1).getReg() != ARM::PC ||
7034         Inst.getOperand(5).getReg() != 0 ||
7035         !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
7036       return false;
7037     MCInst TmpInst;
7038     TmpInst.setOpcode(ARM::ADR);
7039     TmpInst.addOperand(Inst.getOperand(0));
7040     if (Inst.getOperand(2).isImm()) {
7041       // Immediate (mod_imm) will be in its encoded form, we must unencode it
7042       // before passing it to the ADR instruction.
7043       unsigned Enc = Inst.getOperand(2).getImm();
7044       TmpInst.addOperand(MCOperand::createImm(
7045         ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
7046     } else {
7047       // Turn PC-relative expression into absolute expression.
7048       // Reading PC provides the start of the current instruction + 8 and
7049       // the transform to adr is biased by that.
7050       MCSymbol *Dot = getContext().createTempSymbol();
7051       Out.EmitLabel(Dot);
7052       const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
7053       const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
7054                                                      MCSymbolRefExpr::VK_None,
7055                                                      getContext());
7056       const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
7057       const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
7058                                                      getContext());
7059       const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
7060                                                         getContext());
7061       TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
7062     }
7063     TmpInst.addOperand(Inst.getOperand(3));
7064     TmpInst.addOperand(Inst.getOperand(4));
7065     Inst = TmpInst;
7066     return true;
7067   }
7068   // Aliases for alternate PC+imm syntax of LDR instructions.
7069   case ARM::t2LDRpcrel:
7070     // Select the narrow version if the immediate will fit.
7071     if (Inst.getOperand(1).getImm() > 0 &&
7072         Inst.getOperand(1).getImm() <= 0xff &&
7073         !HasWideQualifier)
7074       Inst.setOpcode(ARM::tLDRpci);
7075     else
7076       Inst.setOpcode(ARM::t2LDRpci);
7077     return true;
7078   case ARM::t2LDRBpcrel:
7079     Inst.setOpcode(ARM::t2LDRBpci);
7080     return true;
7081   case ARM::t2LDRHpcrel:
7082     Inst.setOpcode(ARM::t2LDRHpci);
7083     return true;
7084   case ARM::t2LDRSBpcrel:
7085     Inst.setOpcode(ARM::t2LDRSBpci);
7086     return true;
7087   case ARM::t2LDRSHpcrel:
7088     Inst.setOpcode(ARM::t2LDRSHpci);
7089     return true;
7090   case ARM::LDRConstPool:
7091   case ARM::tLDRConstPool:
7092   case ARM::t2LDRConstPool: {
7093     // Pseudo instruction ldr rt, =immediate is converted to a
7094     // MOV rt, immediate if immediate is known and representable
7095     // otherwise we create a constant pool entry that we load from.
7096     MCInst TmpInst;
7097     if (Inst.getOpcode() == ARM::LDRConstPool)
7098       TmpInst.setOpcode(ARM::LDRi12);
7099     else if (Inst.getOpcode() == ARM::tLDRConstPool)
7100       TmpInst.setOpcode(ARM::tLDRpci);
7101     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
7102       TmpInst.setOpcode(ARM::t2LDRpci);
7103     const ARMOperand &PoolOperand =
7104       (HasWideQualifier ?
7105        static_cast<ARMOperand &>(*Operands[4]) :
7106        static_cast<ARMOperand &>(*Operands[3]));
7107     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
7108     // If SubExprVal is a constant we may be able to use a MOV
7109     if (isa<MCConstantExpr>(SubExprVal) &&
7110         Inst.getOperand(0).getReg() != ARM::PC &&
7111         Inst.getOperand(0).getReg() != ARM::SP) {
7112       int64_t Value =
7113         (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
7114       bool UseMov  = true;
7115       bool MovHasS = true;
7116       if (Inst.getOpcode() == ARM::LDRConstPool) {
7117         // ARM Constant
7118         if (ARM_AM::getSOImmVal(Value) != -1) {
7119           Value = ARM_AM::getSOImmVal(Value);
7120           TmpInst.setOpcode(ARM::MOVi);
7121         }
7122         else if (ARM_AM::getSOImmVal(~Value) != -1) {
7123           Value = ARM_AM::getSOImmVal(~Value);
7124           TmpInst.setOpcode(ARM::MVNi);
7125         }
7126         else if (hasV6T2Ops() &&
7127                  Value >=0 && Value < 65536) {
7128           TmpInst.setOpcode(ARM::MOVi16);
7129           MovHasS = false;
7130         }
7131         else
7132           UseMov = false;
7133       }
7134       else {
7135         // Thumb/Thumb2 Constant
7136         if (hasThumb2() &&
7137             ARM_AM::getT2SOImmVal(Value) != -1)
7138           TmpInst.setOpcode(ARM::t2MOVi);
7139         else if (hasThumb2() &&
7140                  ARM_AM::getT2SOImmVal(~Value) != -1) {
7141           TmpInst.setOpcode(ARM::t2MVNi);
7142           Value = ~Value;
7143         }
7144         else if (hasV8MBaseline() &&
7145                  Value >=0 && Value < 65536) {
7146           TmpInst.setOpcode(ARM::t2MOVi16);
7147           MovHasS = false;
7148         }
7149         else
7150           UseMov = false;
7151       }
7152       if (UseMov) {
7153         TmpInst.addOperand(Inst.getOperand(0));           // Rt
7154         TmpInst.addOperand(MCOperand::createImm(Value));  // Immediate
7155         TmpInst.addOperand(Inst.getOperand(2));           // CondCode
7156         TmpInst.addOperand(Inst.getOperand(3));           // CondCode
7157         if (MovHasS)
7158           TmpInst.addOperand(MCOperand::createReg(0));    // S
7159         Inst = TmpInst;
7160         return true;
7161       }
7162     }
7163     // No opportunity to use MOV/MVN create constant pool
7164     const MCExpr *CPLoc =
7165       getTargetStreamer().addConstantPoolEntry(SubExprVal,
7166                                                PoolOperand.getStartLoc());
7167     TmpInst.addOperand(Inst.getOperand(0));           // Rt
7168     TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
7169     if (TmpInst.getOpcode() == ARM::LDRi12)
7170       TmpInst.addOperand(MCOperand::createImm(0));    // unused offset
7171     TmpInst.addOperand(Inst.getOperand(2));           // CondCode
7172     TmpInst.addOperand(Inst.getOperand(3));           // CondCode
7173     Inst = TmpInst;
7174     return true;
7175   }
7176   // Handle NEON VST complex aliases.
7177   case ARM::VST1LNdWB_register_Asm_8:
7178   case ARM::VST1LNdWB_register_Asm_16:
7179   case ARM::VST1LNdWB_register_Asm_32: {
7180     MCInst TmpInst;
7181     // Shuffle the operands around so the lane index operand is in the
7182     // right place.
7183     unsigned Spacing;
7184     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7185     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7186     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7187     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7188     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7189     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7190     TmpInst.addOperand(Inst.getOperand(1)); // lane
7191     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7192     TmpInst.addOperand(Inst.getOperand(6));
7193     Inst = TmpInst;
7194     return true;
7195   }
7196 
7197   case ARM::VST2LNdWB_register_Asm_8:
7198   case ARM::VST2LNdWB_register_Asm_16:
7199   case ARM::VST2LNdWB_register_Asm_32:
7200   case ARM::VST2LNqWB_register_Asm_16:
7201   case ARM::VST2LNqWB_register_Asm_32: {
7202     MCInst TmpInst;
7203     // Shuffle the operands around so the lane index operand is in the
7204     // right place.
7205     unsigned Spacing;
7206     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7207     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7208     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7209     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7210     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7211     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7212     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7213                                             Spacing));
7214     TmpInst.addOperand(Inst.getOperand(1)); // lane
7215     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7216     TmpInst.addOperand(Inst.getOperand(6));
7217     Inst = TmpInst;
7218     return true;
7219   }
7220 
7221   case ARM::VST3LNdWB_register_Asm_8:
7222   case ARM::VST3LNdWB_register_Asm_16:
7223   case ARM::VST3LNdWB_register_Asm_32:
7224   case ARM::VST3LNqWB_register_Asm_16:
7225   case ARM::VST3LNqWB_register_Asm_32: {
7226     MCInst TmpInst;
7227     // Shuffle the operands around so the lane index operand is in the
7228     // right place.
7229     unsigned Spacing;
7230     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7231     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7232     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7233     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7234     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7235     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7236     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7237                                             Spacing));
7238     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7239                                             Spacing * 2));
7240     TmpInst.addOperand(Inst.getOperand(1)); // lane
7241     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7242     TmpInst.addOperand(Inst.getOperand(6));
7243     Inst = TmpInst;
7244     return true;
7245   }
7246 
7247   case ARM::VST4LNdWB_register_Asm_8:
7248   case ARM::VST4LNdWB_register_Asm_16:
7249   case ARM::VST4LNdWB_register_Asm_32:
7250   case ARM::VST4LNqWB_register_Asm_16:
7251   case ARM::VST4LNqWB_register_Asm_32: {
7252     MCInst TmpInst;
7253     // Shuffle the operands around so the lane index operand is in the
7254     // right place.
7255     unsigned Spacing;
7256     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7257     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7258     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7259     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7260     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7261     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7262     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7263                                             Spacing));
7264     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7265                                             Spacing * 2));
7266     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7267                                             Spacing * 3));
7268     TmpInst.addOperand(Inst.getOperand(1)); // lane
7269     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7270     TmpInst.addOperand(Inst.getOperand(6));
7271     Inst = TmpInst;
7272     return true;
7273   }
7274 
7275   case ARM::VST1LNdWB_fixed_Asm_8:
7276   case ARM::VST1LNdWB_fixed_Asm_16:
7277   case ARM::VST1LNdWB_fixed_Asm_32: {
7278     MCInst TmpInst;
7279     // Shuffle the operands around so the lane index operand is in the
7280     // right place.
7281     unsigned Spacing;
7282     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7283     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7284     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7285     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7286     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7287     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7288     TmpInst.addOperand(Inst.getOperand(1)); // lane
7289     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7290     TmpInst.addOperand(Inst.getOperand(5));
7291     Inst = TmpInst;
7292     return true;
7293   }
7294 
7295   case ARM::VST2LNdWB_fixed_Asm_8:
7296   case ARM::VST2LNdWB_fixed_Asm_16:
7297   case ARM::VST2LNdWB_fixed_Asm_32:
7298   case ARM::VST2LNqWB_fixed_Asm_16:
7299   case ARM::VST2LNqWB_fixed_Asm_32: {
7300     MCInst TmpInst;
7301     // Shuffle the operands around so the lane index operand is in the
7302     // right place.
7303     unsigned Spacing;
7304     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7305     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7306     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7307     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7308     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7309     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7310     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7311                                             Spacing));
7312     TmpInst.addOperand(Inst.getOperand(1)); // lane
7313     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7314     TmpInst.addOperand(Inst.getOperand(5));
7315     Inst = TmpInst;
7316     return true;
7317   }
7318 
7319   case ARM::VST3LNdWB_fixed_Asm_8:
7320   case ARM::VST3LNdWB_fixed_Asm_16:
7321   case ARM::VST3LNdWB_fixed_Asm_32:
7322   case ARM::VST3LNqWB_fixed_Asm_16:
7323   case ARM::VST3LNqWB_fixed_Asm_32: {
7324     MCInst TmpInst;
7325     // Shuffle the operands around so the lane index operand is in the
7326     // right place.
7327     unsigned Spacing;
7328     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7329     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7330     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7331     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7332     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7333     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7334     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7335                                             Spacing));
7336     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7337                                             Spacing * 2));
7338     TmpInst.addOperand(Inst.getOperand(1)); // lane
7339     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7340     TmpInst.addOperand(Inst.getOperand(5));
7341     Inst = TmpInst;
7342     return true;
7343   }
7344 
7345   case ARM::VST4LNdWB_fixed_Asm_8:
7346   case ARM::VST4LNdWB_fixed_Asm_16:
7347   case ARM::VST4LNdWB_fixed_Asm_32:
7348   case ARM::VST4LNqWB_fixed_Asm_16:
7349   case ARM::VST4LNqWB_fixed_Asm_32: {
7350     MCInst TmpInst;
7351     // Shuffle the operands around so the lane index operand is in the
7352     // right place.
7353     unsigned Spacing;
7354     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7355     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7356     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7357     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7358     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7359     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7360     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7361                                             Spacing));
7362     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7363                                             Spacing * 2));
7364     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7365                                             Spacing * 3));
7366     TmpInst.addOperand(Inst.getOperand(1)); // lane
7367     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7368     TmpInst.addOperand(Inst.getOperand(5));
7369     Inst = TmpInst;
7370     return true;
7371   }
7372 
7373   case ARM::VST1LNdAsm_8:
7374   case ARM::VST1LNdAsm_16:
7375   case ARM::VST1LNdAsm_32: {
7376     MCInst TmpInst;
7377     // Shuffle the operands around so the lane index operand is in the
7378     // right place.
7379     unsigned Spacing;
7380     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7381     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7382     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7383     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7384     TmpInst.addOperand(Inst.getOperand(1)); // lane
7385     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7386     TmpInst.addOperand(Inst.getOperand(5));
7387     Inst = TmpInst;
7388     return true;
7389   }
7390 
7391   case ARM::VST2LNdAsm_8:
7392   case ARM::VST2LNdAsm_16:
7393   case ARM::VST2LNdAsm_32:
7394   case ARM::VST2LNqAsm_16:
7395   case ARM::VST2LNqAsm_32: {
7396     MCInst TmpInst;
7397     // Shuffle the operands around so the lane index operand is in the
7398     // right place.
7399     unsigned Spacing;
7400     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7401     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7402     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7403     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7404     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7405                                             Spacing));
7406     TmpInst.addOperand(Inst.getOperand(1)); // lane
7407     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7408     TmpInst.addOperand(Inst.getOperand(5));
7409     Inst = TmpInst;
7410     return true;
7411   }
7412 
7413   case ARM::VST3LNdAsm_8:
7414   case ARM::VST3LNdAsm_16:
7415   case ARM::VST3LNdAsm_32:
7416   case ARM::VST3LNqAsm_16:
7417   case ARM::VST3LNqAsm_32: {
7418     MCInst TmpInst;
7419     // Shuffle the operands around so the lane index operand is in the
7420     // right place.
7421     unsigned Spacing;
7422     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7423     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7424     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7425     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7426     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7427                                             Spacing));
7428     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7429                                             Spacing * 2));
7430     TmpInst.addOperand(Inst.getOperand(1)); // lane
7431     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7432     TmpInst.addOperand(Inst.getOperand(5));
7433     Inst = TmpInst;
7434     return true;
7435   }
7436 
7437   case ARM::VST4LNdAsm_8:
7438   case ARM::VST4LNdAsm_16:
7439   case ARM::VST4LNdAsm_32:
7440   case ARM::VST4LNqAsm_16:
7441   case ARM::VST4LNqAsm_32: {
7442     MCInst TmpInst;
7443     // Shuffle the operands around so the lane index operand is in the
7444     // right place.
7445     unsigned Spacing;
7446     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7447     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7448     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7449     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7450     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7451                                             Spacing));
7452     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7453                                             Spacing * 2));
7454     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7455                                             Spacing * 3));
7456     TmpInst.addOperand(Inst.getOperand(1)); // lane
7457     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7458     TmpInst.addOperand(Inst.getOperand(5));
7459     Inst = TmpInst;
7460     return true;
7461   }
7462 
7463   // Handle NEON VLD complex aliases.
7464   case ARM::VLD1LNdWB_register_Asm_8:
7465   case ARM::VLD1LNdWB_register_Asm_16:
7466   case ARM::VLD1LNdWB_register_Asm_32: {
7467     MCInst TmpInst;
7468     // Shuffle the operands around so the lane index operand is in the
7469     // right place.
7470     unsigned Spacing;
7471     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7472     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7473     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7474     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7475     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7476     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7477     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7478     TmpInst.addOperand(Inst.getOperand(1)); // lane
7479     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7480     TmpInst.addOperand(Inst.getOperand(6));
7481     Inst = TmpInst;
7482     return true;
7483   }
7484 
7485   case ARM::VLD2LNdWB_register_Asm_8:
7486   case ARM::VLD2LNdWB_register_Asm_16:
7487   case ARM::VLD2LNdWB_register_Asm_32:
7488   case ARM::VLD2LNqWB_register_Asm_16:
7489   case ARM::VLD2LNqWB_register_Asm_32: {
7490     MCInst TmpInst;
7491     // Shuffle the operands around so the lane index operand is in the
7492     // right place.
7493     unsigned Spacing;
7494     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7495     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7496     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7497                                             Spacing));
7498     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7499     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7500     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7501     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7502     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7503     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7504                                             Spacing));
7505     TmpInst.addOperand(Inst.getOperand(1)); // lane
7506     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7507     TmpInst.addOperand(Inst.getOperand(6));
7508     Inst = TmpInst;
7509     return true;
7510   }
7511 
7512   case ARM::VLD3LNdWB_register_Asm_8:
7513   case ARM::VLD3LNdWB_register_Asm_16:
7514   case ARM::VLD3LNdWB_register_Asm_32:
7515   case ARM::VLD3LNqWB_register_Asm_16:
7516   case ARM::VLD3LNqWB_register_Asm_32: {
7517     MCInst TmpInst;
7518     // Shuffle the operands around so the lane index operand is in the
7519     // right place.
7520     unsigned Spacing;
7521     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7522     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7523     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7524                                             Spacing));
7525     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7526                                             Spacing * 2));
7527     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7528     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7529     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7530     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7531     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7532     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7533                                             Spacing));
7534     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7535                                             Spacing * 2));
7536     TmpInst.addOperand(Inst.getOperand(1)); // lane
7537     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7538     TmpInst.addOperand(Inst.getOperand(6));
7539     Inst = TmpInst;
7540     return true;
7541   }
7542 
7543   case ARM::VLD4LNdWB_register_Asm_8:
7544   case ARM::VLD4LNdWB_register_Asm_16:
7545   case ARM::VLD4LNdWB_register_Asm_32:
7546   case ARM::VLD4LNqWB_register_Asm_16:
7547   case ARM::VLD4LNqWB_register_Asm_32: {
7548     MCInst TmpInst;
7549     // Shuffle the operands around so the lane index operand is in the
7550     // right place.
7551     unsigned Spacing;
7552     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7553     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7554     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7555                                             Spacing));
7556     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7557                                             Spacing * 2));
7558     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7559                                             Spacing * 3));
7560     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7561     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7562     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7563     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7564     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7565     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7566                                             Spacing));
7567     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7568                                             Spacing * 2));
7569     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7570                                             Spacing * 3));
7571     TmpInst.addOperand(Inst.getOperand(1)); // lane
7572     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7573     TmpInst.addOperand(Inst.getOperand(6));
7574     Inst = TmpInst;
7575     return true;
7576   }
7577 
7578   case ARM::VLD1LNdWB_fixed_Asm_8:
7579   case ARM::VLD1LNdWB_fixed_Asm_16:
7580   case ARM::VLD1LNdWB_fixed_Asm_32: {
7581     MCInst TmpInst;
7582     // Shuffle the operands around so the lane index operand is in the
7583     // right place.
7584     unsigned Spacing;
7585     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7586     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7587     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7588     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7589     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7590     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7591     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7592     TmpInst.addOperand(Inst.getOperand(1)); // lane
7593     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7594     TmpInst.addOperand(Inst.getOperand(5));
7595     Inst = TmpInst;
7596     return true;
7597   }
7598 
7599   case ARM::VLD2LNdWB_fixed_Asm_8:
7600   case ARM::VLD2LNdWB_fixed_Asm_16:
7601   case ARM::VLD2LNdWB_fixed_Asm_32:
7602   case ARM::VLD2LNqWB_fixed_Asm_16:
7603   case ARM::VLD2LNqWB_fixed_Asm_32: {
7604     MCInst TmpInst;
7605     // Shuffle the operands around so the lane index operand is in the
7606     // right place.
7607     unsigned Spacing;
7608     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7609     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7610     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7611                                             Spacing));
7612     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7613     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7614     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7615     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7616     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7617     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7618                                             Spacing));
7619     TmpInst.addOperand(Inst.getOperand(1)); // lane
7620     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7621     TmpInst.addOperand(Inst.getOperand(5));
7622     Inst = TmpInst;
7623     return true;
7624   }
7625 
7626   case ARM::VLD3LNdWB_fixed_Asm_8:
7627   case ARM::VLD3LNdWB_fixed_Asm_16:
7628   case ARM::VLD3LNdWB_fixed_Asm_32:
7629   case ARM::VLD3LNqWB_fixed_Asm_16:
7630   case ARM::VLD3LNqWB_fixed_Asm_32: {
7631     MCInst TmpInst;
7632     // Shuffle the operands around so the lane index operand is in the
7633     // right place.
7634     unsigned Spacing;
7635     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7636     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7637     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7638                                             Spacing));
7639     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7640                                             Spacing * 2));
7641     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7642     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7643     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7644     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7645     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7646     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7647                                             Spacing));
7648     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7649                                             Spacing * 2));
7650     TmpInst.addOperand(Inst.getOperand(1)); // lane
7651     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7652     TmpInst.addOperand(Inst.getOperand(5));
7653     Inst = TmpInst;
7654     return true;
7655   }
7656 
7657   case ARM::VLD4LNdWB_fixed_Asm_8:
7658   case ARM::VLD4LNdWB_fixed_Asm_16:
7659   case ARM::VLD4LNdWB_fixed_Asm_32:
7660   case ARM::VLD4LNqWB_fixed_Asm_16:
7661   case ARM::VLD4LNqWB_fixed_Asm_32: {
7662     MCInst TmpInst;
7663     // Shuffle the operands around so the lane index operand is in the
7664     // right place.
7665     unsigned Spacing;
7666     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7667     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7668     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7669                                             Spacing));
7670     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7671                                             Spacing * 2));
7672     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7673                                             Spacing * 3));
7674     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7675     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7676     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7677     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7678     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7679     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7680                                             Spacing));
7681     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7682                                             Spacing * 2));
7683     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7684                                             Spacing * 3));
7685     TmpInst.addOperand(Inst.getOperand(1)); // lane
7686     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7687     TmpInst.addOperand(Inst.getOperand(5));
7688     Inst = TmpInst;
7689     return true;
7690   }
7691 
7692   case ARM::VLD1LNdAsm_8:
7693   case ARM::VLD1LNdAsm_16:
7694   case ARM::VLD1LNdAsm_32: {
7695     MCInst TmpInst;
7696     // Shuffle the operands around so the lane index operand is in the
7697     // right place.
7698     unsigned Spacing;
7699     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7700     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7701     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7702     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7703     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7704     TmpInst.addOperand(Inst.getOperand(1)); // lane
7705     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7706     TmpInst.addOperand(Inst.getOperand(5));
7707     Inst = TmpInst;
7708     return true;
7709   }
7710 
7711   case ARM::VLD2LNdAsm_8:
7712   case ARM::VLD2LNdAsm_16:
7713   case ARM::VLD2LNdAsm_32:
7714   case ARM::VLD2LNqAsm_16:
7715   case ARM::VLD2LNqAsm_32: {
7716     MCInst TmpInst;
7717     // Shuffle the operands around so the lane index operand is in the
7718     // right place.
7719     unsigned Spacing;
7720     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7721     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7722     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7723                                             Spacing));
7724     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7725     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7726     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7727     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7728                                             Spacing));
7729     TmpInst.addOperand(Inst.getOperand(1)); // lane
7730     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7731     TmpInst.addOperand(Inst.getOperand(5));
7732     Inst = TmpInst;
7733     return true;
7734   }
7735 
7736   case ARM::VLD3LNdAsm_8:
7737   case ARM::VLD3LNdAsm_16:
7738   case ARM::VLD3LNdAsm_32:
7739   case ARM::VLD3LNqAsm_16:
7740   case ARM::VLD3LNqAsm_32: {
7741     MCInst TmpInst;
7742     // Shuffle the operands around so the lane index operand is in the
7743     // right place.
7744     unsigned Spacing;
7745     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7746     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7747     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7748                                             Spacing));
7749     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7750                                             Spacing * 2));
7751     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7752     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7753     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7754     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7755                                             Spacing));
7756     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7757                                             Spacing * 2));
7758     TmpInst.addOperand(Inst.getOperand(1)); // lane
7759     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7760     TmpInst.addOperand(Inst.getOperand(5));
7761     Inst = TmpInst;
7762     return true;
7763   }
7764 
7765   case ARM::VLD4LNdAsm_8:
7766   case ARM::VLD4LNdAsm_16:
7767   case ARM::VLD4LNdAsm_32:
7768   case ARM::VLD4LNqAsm_16:
7769   case ARM::VLD4LNqAsm_32: {
7770     MCInst TmpInst;
7771     // Shuffle the operands around so the lane index operand is in the
7772     // right place.
7773     unsigned Spacing;
7774     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7775     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7776     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7777                                             Spacing));
7778     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7779                                             Spacing * 2));
7780     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7781                                             Spacing * 3));
7782     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7783     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7784     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7785     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7786                                             Spacing));
7787     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7788                                             Spacing * 2));
7789     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7790                                             Spacing * 3));
7791     TmpInst.addOperand(Inst.getOperand(1)); // lane
7792     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7793     TmpInst.addOperand(Inst.getOperand(5));
7794     Inst = TmpInst;
7795     return true;
7796   }
7797 
7798   // VLD3DUP single 3-element structure to all lanes instructions.
7799   case ARM::VLD3DUPdAsm_8:
7800   case ARM::VLD3DUPdAsm_16:
7801   case ARM::VLD3DUPdAsm_32:
7802   case ARM::VLD3DUPqAsm_8:
7803   case ARM::VLD3DUPqAsm_16:
7804   case ARM::VLD3DUPqAsm_32: {
7805     MCInst TmpInst;
7806     unsigned Spacing;
7807     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7808     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7809     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7810                                             Spacing));
7811     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7812                                             Spacing * 2));
7813     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7814     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7815     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7816     TmpInst.addOperand(Inst.getOperand(4));
7817     Inst = TmpInst;
7818     return true;
7819   }
7820 
7821   case ARM::VLD3DUPdWB_fixed_Asm_8:
7822   case ARM::VLD3DUPdWB_fixed_Asm_16:
7823   case ARM::VLD3DUPdWB_fixed_Asm_32:
7824   case ARM::VLD3DUPqWB_fixed_Asm_8:
7825   case ARM::VLD3DUPqWB_fixed_Asm_16:
7826   case ARM::VLD3DUPqWB_fixed_Asm_32: {
7827     MCInst TmpInst;
7828     unsigned Spacing;
7829     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7830     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7831     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7832                                             Spacing));
7833     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7834                                             Spacing * 2));
7835     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7836     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7837     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7838     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7839     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7840     TmpInst.addOperand(Inst.getOperand(4));
7841     Inst = TmpInst;
7842     return true;
7843   }
7844 
7845   case ARM::VLD3DUPdWB_register_Asm_8:
7846   case ARM::VLD3DUPdWB_register_Asm_16:
7847   case ARM::VLD3DUPdWB_register_Asm_32:
7848   case ARM::VLD3DUPqWB_register_Asm_8:
7849   case ARM::VLD3DUPqWB_register_Asm_16:
7850   case ARM::VLD3DUPqWB_register_Asm_32: {
7851     MCInst TmpInst;
7852     unsigned Spacing;
7853     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7854     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7855     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7856                                             Spacing));
7857     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7858                                             Spacing * 2));
7859     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7860     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7861     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7862     TmpInst.addOperand(Inst.getOperand(3)); // Rm
7863     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7864     TmpInst.addOperand(Inst.getOperand(5));
7865     Inst = TmpInst;
7866     return true;
7867   }
7868 
7869   // VLD3 multiple 3-element structure instructions.
7870   case ARM::VLD3dAsm_8:
7871   case ARM::VLD3dAsm_16:
7872   case ARM::VLD3dAsm_32:
7873   case ARM::VLD3qAsm_8:
7874   case ARM::VLD3qAsm_16:
7875   case ARM::VLD3qAsm_32: {
7876     MCInst TmpInst;
7877     unsigned Spacing;
7878     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7879     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7880     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7881                                             Spacing));
7882     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7883                                             Spacing * 2));
7884     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7885     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7886     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7887     TmpInst.addOperand(Inst.getOperand(4));
7888     Inst = TmpInst;
7889     return true;
7890   }
7891 
7892   case ARM::VLD3dWB_fixed_Asm_8:
7893   case ARM::VLD3dWB_fixed_Asm_16:
7894   case ARM::VLD3dWB_fixed_Asm_32:
7895   case ARM::VLD3qWB_fixed_Asm_8:
7896   case ARM::VLD3qWB_fixed_Asm_16:
7897   case ARM::VLD3qWB_fixed_Asm_32: {
7898     MCInst TmpInst;
7899     unsigned Spacing;
7900     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7901     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7902     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7903                                             Spacing));
7904     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7905                                             Spacing * 2));
7906     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7907     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7908     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7909     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7910     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7911     TmpInst.addOperand(Inst.getOperand(4));
7912     Inst = TmpInst;
7913     return true;
7914   }
7915 
7916   case ARM::VLD3dWB_register_Asm_8:
7917   case ARM::VLD3dWB_register_Asm_16:
7918   case ARM::VLD3dWB_register_Asm_32:
7919   case ARM::VLD3qWB_register_Asm_8:
7920   case ARM::VLD3qWB_register_Asm_16:
7921   case ARM::VLD3qWB_register_Asm_32: {
7922     MCInst TmpInst;
7923     unsigned Spacing;
7924     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7925     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7926     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7927                                             Spacing));
7928     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7929                                             Spacing * 2));
7930     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7931     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7932     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7933     TmpInst.addOperand(Inst.getOperand(3)); // Rm
7934     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7935     TmpInst.addOperand(Inst.getOperand(5));
7936     Inst = TmpInst;
7937     return true;
7938   }
7939 
7940   // VLD4DUP single 3-element structure to all lanes instructions.
7941   case ARM::VLD4DUPdAsm_8:
7942   case ARM::VLD4DUPdAsm_16:
7943   case ARM::VLD4DUPdAsm_32:
7944   case ARM::VLD4DUPqAsm_8:
7945   case ARM::VLD4DUPqAsm_16:
7946   case ARM::VLD4DUPqAsm_32: {
7947     MCInst TmpInst;
7948     unsigned Spacing;
7949     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7950     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7951     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7952                                             Spacing));
7953     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7954                                             Spacing * 2));
7955     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7956                                             Spacing * 3));
7957     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7958     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7959     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7960     TmpInst.addOperand(Inst.getOperand(4));
7961     Inst = TmpInst;
7962     return true;
7963   }
7964 
7965   case ARM::VLD4DUPdWB_fixed_Asm_8:
7966   case ARM::VLD4DUPdWB_fixed_Asm_16:
7967   case ARM::VLD4DUPdWB_fixed_Asm_32:
7968   case ARM::VLD4DUPqWB_fixed_Asm_8:
7969   case ARM::VLD4DUPqWB_fixed_Asm_16:
7970   case ARM::VLD4DUPqWB_fixed_Asm_32: {
7971     MCInst TmpInst;
7972     unsigned Spacing;
7973     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7974     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7975     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7976                                             Spacing));
7977     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7978                                             Spacing * 2));
7979     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7980                                             Spacing * 3));
7981     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7982     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7983     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7984     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7985     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7986     TmpInst.addOperand(Inst.getOperand(4));
7987     Inst = TmpInst;
7988     return true;
7989   }
7990 
7991   case ARM::VLD4DUPdWB_register_Asm_8:
7992   case ARM::VLD4DUPdWB_register_Asm_16:
7993   case ARM::VLD4DUPdWB_register_Asm_32:
7994   case ARM::VLD4DUPqWB_register_Asm_8:
7995   case ARM::VLD4DUPqWB_register_Asm_16:
7996   case ARM::VLD4DUPqWB_register_Asm_32: {
7997     MCInst TmpInst;
7998     unsigned Spacing;
7999     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8000     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8001     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8002                                             Spacing));
8003     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8004                                             Spacing * 2));
8005     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8006                                             Spacing * 3));
8007     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8008     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8009     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8010     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8011     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8012     TmpInst.addOperand(Inst.getOperand(5));
8013     Inst = TmpInst;
8014     return true;
8015   }
8016 
8017   // VLD4 multiple 4-element structure instructions.
8018   case ARM::VLD4dAsm_8:
8019   case ARM::VLD4dAsm_16:
8020   case ARM::VLD4dAsm_32:
8021   case ARM::VLD4qAsm_8:
8022   case ARM::VLD4qAsm_16:
8023   case ARM::VLD4qAsm_32: {
8024     MCInst TmpInst;
8025     unsigned Spacing;
8026     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8027     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8028     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8029                                             Spacing));
8030     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8031                                             Spacing * 2));
8032     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8033                                             Spacing * 3));
8034     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8035     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8036     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8037     TmpInst.addOperand(Inst.getOperand(4));
8038     Inst = TmpInst;
8039     return true;
8040   }
8041 
8042   case ARM::VLD4dWB_fixed_Asm_8:
8043   case ARM::VLD4dWB_fixed_Asm_16:
8044   case ARM::VLD4dWB_fixed_Asm_32:
8045   case ARM::VLD4qWB_fixed_Asm_8:
8046   case ARM::VLD4qWB_fixed_Asm_16:
8047   case ARM::VLD4qWB_fixed_Asm_32: {
8048     MCInst TmpInst;
8049     unsigned Spacing;
8050     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8051     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8052     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8053                                             Spacing));
8054     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8055                                             Spacing * 2));
8056     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8057                                             Spacing * 3));
8058     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8059     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8060     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8061     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8062     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8063     TmpInst.addOperand(Inst.getOperand(4));
8064     Inst = TmpInst;
8065     return true;
8066   }
8067 
8068   case ARM::VLD4dWB_register_Asm_8:
8069   case ARM::VLD4dWB_register_Asm_16:
8070   case ARM::VLD4dWB_register_Asm_32:
8071   case ARM::VLD4qWB_register_Asm_8:
8072   case ARM::VLD4qWB_register_Asm_16:
8073   case ARM::VLD4qWB_register_Asm_32: {
8074     MCInst TmpInst;
8075     unsigned Spacing;
8076     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8077     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8078     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8079                                             Spacing));
8080     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8081                                             Spacing * 2));
8082     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8083                                             Spacing * 3));
8084     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8085     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8086     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8087     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8088     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8089     TmpInst.addOperand(Inst.getOperand(5));
8090     Inst = TmpInst;
8091     return true;
8092   }
8093 
8094   // VST3 multiple 3-element structure instructions.
8095   case ARM::VST3dAsm_8:
8096   case ARM::VST3dAsm_16:
8097   case ARM::VST3dAsm_32:
8098   case ARM::VST3qAsm_8:
8099   case ARM::VST3qAsm_16:
8100   case ARM::VST3qAsm_32: {
8101     MCInst TmpInst;
8102     unsigned Spacing;
8103     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8104     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8105     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8106     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8107     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8108                                             Spacing));
8109     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8110                                             Spacing * 2));
8111     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8112     TmpInst.addOperand(Inst.getOperand(4));
8113     Inst = TmpInst;
8114     return true;
8115   }
8116 
8117   case ARM::VST3dWB_fixed_Asm_8:
8118   case ARM::VST3dWB_fixed_Asm_16:
8119   case ARM::VST3dWB_fixed_Asm_32:
8120   case ARM::VST3qWB_fixed_Asm_8:
8121   case ARM::VST3qWB_fixed_Asm_16:
8122   case ARM::VST3qWB_fixed_Asm_32: {
8123     MCInst TmpInst;
8124     unsigned Spacing;
8125     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8126     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8127     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8128     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8129     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8130     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8131     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8132                                             Spacing));
8133     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8134                                             Spacing * 2));
8135     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8136     TmpInst.addOperand(Inst.getOperand(4));
8137     Inst = TmpInst;
8138     return true;
8139   }
8140 
8141   case ARM::VST3dWB_register_Asm_8:
8142   case ARM::VST3dWB_register_Asm_16:
8143   case ARM::VST3dWB_register_Asm_32:
8144   case ARM::VST3qWB_register_Asm_8:
8145   case ARM::VST3qWB_register_Asm_16:
8146   case ARM::VST3qWB_register_Asm_32: {
8147     MCInst TmpInst;
8148     unsigned Spacing;
8149     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8150     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8151     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8152     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8153     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8154     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8155     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8156                                             Spacing));
8157     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8158                                             Spacing * 2));
8159     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8160     TmpInst.addOperand(Inst.getOperand(5));
8161     Inst = TmpInst;
8162     return true;
8163   }
8164 
8165   // VST4 multiple 3-element structure instructions.
8166   case ARM::VST4dAsm_8:
8167   case ARM::VST4dAsm_16:
8168   case ARM::VST4dAsm_32:
8169   case ARM::VST4qAsm_8:
8170   case ARM::VST4qAsm_16:
8171   case ARM::VST4qAsm_32: {
8172     MCInst TmpInst;
8173     unsigned Spacing;
8174     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8175     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8176     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8177     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8178     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8179                                             Spacing));
8180     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8181                                             Spacing * 2));
8182     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8183                                             Spacing * 3));
8184     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8185     TmpInst.addOperand(Inst.getOperand(4));
8186     Inst = TmpInst;
8187     return true;
8188   }
8189 
8190   case ARM::VST4dWB_fixed_Asm_8:
8191   case ARM::VST4dWB_fixed_Asm_16:
8192   case ARM::VST4dWB_fixed_Asm_32:
8193   case ARM::VST4qWB_fixed_Asm_8:
8194   case ARM::VST4qWB_fixed_Asm_16:
8195   case ARM::VST4qWB_fixed_Asm_32: {
8196     MCInst TmpInst;
8197     unsigned Spacing;
8198     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8199     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8200     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8201     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8202     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8203     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8204     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8205                                             Spacing));
8206     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8207                                             Spacing * 2));
8208     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8209                                             Spacing * 3));
8210     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8211     TmpInst.addOperand(Inst.getOperand(4));
8212     Inst = TmpInst;
8213     return true;
8214   }
8215 
8216   case ARM::VST4dWB_register_Asm_8:
8217   case ARM::VST4dWB_register_Asm_16:
8218   case ARM::VST4dWB_register_Asm_32:
8219   case ARM::VST4qWB_register_Asm_8:
8220   case ARM::VST4qWB_register_Asm_16:
8221   case ARM::VST4qWB_register_Asm_32: {
8222     MCInst TmpInst;
8223     unsigned Spacing;
8224     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8225     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8226     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8227     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8228     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8229     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8230     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8231                                             Spacing));
8232     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8233                                             Spacing * 2));
8234     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8235                                             Spacing * 3));
8236     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8237     TmpInst.addOperand(Inst.getOperand(5));
8238     Inst = TmpInst;
8239     return true;
8240   }
8241 
8242   // Handle encoding choice for the shift-immediate instructions.
8243   case ARM::t2LSLri:
8244   case ARM::t2LSRri:
8245   case ARM::t2ASRri:
8246     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8247         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8248         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8249         !HasWideQualifier) {
8250       unsigned NewOpc;
8251       switch (Inst.getOpcode()) {
8252       default: llvm_unreachable("unexpected opcode");
8253       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
8254       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
8255       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
8256       }
8257       // The Thumb1 operands aren't in the same order. Awesome, eh?
8258       MCInst TmpInst;
8259       TmpInst.setOpcode(NewOpc);
8260       TmpInst.addOperand(Inst.getOperand(0));
8261       TmpInst.addOperand(Inst.getOperand(5));
8262       TmpInst.addOperand(Inst.getOperand(1));
8263       TmpInst.addOperand(Inst.getOperand(2));
8264       TmpInst.addOperand(Inst.getOperand(3));
8265       TmpInst.addOperand(Inst.getOperand(4));
8266       Inst = TmpInst;
8267       return true;
8268     }
8269     return false;
8270 
8271   // Handle the Thumb2 mode MOV complex aliases.
8272   case ARM::t2MOVsr:
8273   case ARM::t2MOVSsr: {
8274     // Which instruction to expand to depends on the CCOut operand and
8275     // whether we're in an IT block if the register operands are low
8276     // registers.
8277     bool isNarrow = false;
8278     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8279         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8280         isARMLowRegister(Inst.getOperand(2).getReg()) &&
8281         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8282         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
8283         !HasWideQualifier)
8284       isNarrow = true;
8285     MCInst TmpInst;
8286     unsigned newOpc;
8287     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
8288     default: llvm_unreachable("unexpected opcode!");
8289     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
8290     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
8291     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
8292     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
8293     }
8294     TmpInst.setOpcode(newOpc);
8295     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8296     if (isNarrow)
8297       TmpInst.addOperand(MCOperand::createReg(
8298           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
8299     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8300     TmpInst.addOperand(Inst.getOperand(2)); // Rm
8301     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8302     TmpInst.addOperand(Inst.getOperand(5));
8303     if (!isNarrow)
8304       TmpInst.addOperand(MCOperand::createReg(
8305           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
8306     Inst = TmpInst;
8307     return true;
8308   }
8309   case ARM::t2MOVsi:
8310   case ARM::t2MOVSsi: {
8311     // Which instruction to expand to depends on the CCOut operand and
8312     // whether we're in an IT block if the register operands are low
8313     // registers.
8314     bool isNarrow = false;
8315     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8316         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8317         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
8318         !HasWideQualifier)
8319       isNarrow = true;
8320     MCInst TmpInst;
8321     unsigned newOpc;
8322     unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
8323     unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
8324     bool isMov = false;
8325     // MOV rd, rm, LSL #0 is actually a MOV instruction
8326     if (Shift == ARM_AM::lsl && Amount == 0) {
8327       isMov = true;
8328       // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
8329       // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
8330       // unpredictable in an IT block so the 32-bit encoding T3 has to be used
8331       // instead.
8332       if (inITBlock()) {
8333         isNarrow = false;
8334       }
8335       newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
8336     } else {
8337       switch(Shift) {
8338       default: llvm_unreachable("unexpected opcode!");
8339       case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
8340       case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
8341       case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
8342       case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
8343       case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
8344       }
8345     }
8346     if (Amount == 32) Amount = 0;
8347     TmpInst.setOpcode(newOpc);
8348     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8349     if (isNarrow && !isMov)
8350       TmpInst.addOperand(MCOperand::createReg(
8351           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
8352     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8353     if (newOpc != ARM::t2RRX && !isMov)
8354       TmpInst.addOperand(MCOperand::createImm(Amount));
8355     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8356     TmpInst.addOperand(Inst.getOperand(4));
8357     if (!isNarrow)
8358       TmpInst.addOperand(MCOperand::createReg(
8359           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
8360     Inst = TmpInst;
8361     return true;
8362   }
8363   // Handle the ARM mode MOV complex aliases.
8364   case ARM::ASRr:
8365   case ARM::LSRr:
8366   case ARM::LSLr:
8367   case ARM::RORr: {
8368     ARM_AM::ShiftOpc ShiftTy;
8369     switch(Inst.getOpcode()) {
8370     default: llvm_unreachable("unexpected opcode!");
8371     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
8372     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
8373     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
8374     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
8375     }
8376     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
8377     MCInst TmpInst;
8378     TmpInst.setOpcode(ARM::MOVsr);
8379     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8380     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8381     TmpInst.addOperand(Inst.getOperand(2)); // Rm
8382     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8383     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8384     TmpInst.addOperand(Inst.getOperand(4));
8385     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
8386     Inst = TmpInst;
8387     return true;
8388   }
8389   case ARM::ASRi:
8390   case ARM::LSRi:
8391   case ARM::LSLi:
8392   case ARM::RORi: {
8393     ARM_AM::ShiftOpc ShiftTy;
8394     switch(Inst.getOpcode()) {
8395     default: llvm_unreachable("unexpected opcode!");
8396     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
8397     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
8398     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
8399     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
8400     }
8401     // A shift by zero is a plain MOVr, not a MOVsi.
8402     unsigned Amt = Inst.getOperand(2).getImm();
8403     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
8404     // A shift by 32 should be encoded as 0 when permitted
8405     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
8406       Amt = 0;
8407     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
8408     MCInst TmpInst;
8409     TmpInst.setOpcode(Opc);
8410     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8411     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8412     if (Opc == ARM::MOVsi)
8413       TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8414     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8415     TmpInst.addOperand(Inst.getOperand(4));
8416     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
8417     Inst = TmpInst;
8418     return true;
8419   }
8420   case ARM::RRXi: {
8421     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
8422     MCInst TmpInst;
8423     TmpInst.setOpcode(ARM::MOVsi);
8424     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8425     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8426     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8427     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8428     TmpInst.addOperand(Inst.getOperand(3));
8429     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
8430     Inst = TmpInst;
8431     return true;
8432   }
8433   case ARM::t2LDMIA_UPD: {
8434     // If this is a load of a single register, then we should use
8435     // a post-indexed LDR instruction instead, per the ARM ARM.
8436     if (Inst.getNumOperands() != 5)
8437       return false;
8438     MCInst TmpInst;
8439     TmpInst.setOpcode(ARM::t2LDR_POST);
8440     TmpInst.addOperand(Inst.getOperand(4)); // Rt
8441     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8442     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8443     TmpInst.addOperand(MCOperand::createImm(4));
8444     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8445     TmpInst.addOperand(Inst.getOperand(3));
8446     Inst = TmpInst;
8447     return true;
8448   }
8449   case ARM::t2STMDB_UPD: {
8450     // If this is a store of a single register, then we should use
8451     // a pre-indexed STR instruction instead, per the ARM ARM.
8452     if (Inst.getNumOperands() != 5)
8453       return false;
8454     MCInst TmpInst;
8455     TmpInst.setOpcode(ARM::t2STR_PRE);
8456     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8457     TmpInst.addOperand(Inst.getOperand(4)); // Rt
8458     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8459     TmpInst.addOperand(MCOperand::createImm(-4));
8460     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8461     TmpInst.addOperand(Inst.getOperand(3));
8462     Inst = TmpInst;
8463     return true;
8464   }
8465   case ARM::LDMIA_UPD:
8466     // If this is a load of a single register via a 'pop', then we should use
8467     // a post-indexed LDR instruction instead, per the ARM ARM.
8468     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
8469         Inst.getNumOperands() == 5) {
8470       MCInst TmpInst;
8471       TmpInst.setOpcode(ARM::LDR_POST_IMM);
8472       TmpInst.addOperand(Inst.getOperand(4)); // Rt
8473       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8474       TmpInst.addOperand(Inst.getOperand(1)); // Rn
8475       TmpInst.addOperand(MCOperand::createReg(0));  // am2offset
8476       TmpInst.addOperand(MCOperand::createImm(4));
8477       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8478       TmpInst.addOperand(Inst.getOperand(3));
8479       Inst = TmpInst;
8480       return true;
8481     }
8482     break;
8483   case ARM::STMDB_UPD:
8484     // If this is a store of a single register via a 'push', then we should use
8485     // a pre-indexed STR instruction instead, per the ARM ARM.
8486     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
8487         Inst.getNumOperands() == 5) {
8488       MCInst TmpInst;
8489       TmpInst.setOpcode(ARM::STR_PRE_IMM);
8490       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8491       TmpInst.addOperand(Inst.getOperand(4)); // Rt
8492       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
8493       TmpInst.addOperand(MCOperand::createImm(-4));
8494       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8495       TmpInst.addOperand(Inst.getOperand(3));
8496       Inst = TmpInst;
8497     }
8498     break;
8499   case ARM::t2ADDri12:
8500     // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
8501     // mnemonic was used (not "addw"), encoding T3 is preferred.
8502     if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" ||
8503         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8504       break;
8505     Inst.setOpcode(ARM::t2ADDri);
8506     Inst.addOperand(MCOperand::createReg(0)); // cc_out
8507     break;
8508   case ARM::t2SUBri12:
8509     // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
8510     // mnemonic was used (not "subw"), encoding T3 is preferred.
8511     if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" ||
8512         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8513       break;
8514     Inst.setOpcode(ARM::t2SUBri);
8515     Inst.addOperand(MCOperand::createReg(0)); // cc_out
8516     break;
8517   case ARM::tADDi8:
8518     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8519     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8520     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8521     // to encoding T1 if <Rd> is omitted."
8522     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8523       Inst.setOpcode(ARM::tADDi3);
8524       return true;
8525     }
8526     break;
8527   case ARM::tSUBi8:
8528     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8529     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8530     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8531     // to encoding T1 if <Rd> is omitted."
8532     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8533       Inst.setOpcode(ARM::tSUBi3);
8534       return true;
8535     }
8536     break;
8537   case ARM::t2ADDri:
8538   case ARM::t2SUBri: {
8539     // If the destination and first source operand are the same, and
8540     // the flags are compatible with the current IT status, use encoding T2
8541     // instead of T3. For compatibility with the system 'as'. Make sure the
8542     // wide encoding wasn't explicit.
8543     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
8544         !isARMLowRegister(Inst.getOperand(0).getReg()) ||
8545         (Inst.getOperand(2).isImm() &&
8546          (unsigned)Inst.getOperand(2).getImm() > 255) ||
8547         Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
8548         HasWideQualifier)
8549       break;
8550     MCInst TmpInst;
8551     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
8552                       ARM::tADDi8 : ARM::tSUBi8);
8553     TmpInst.addOperand(Inst.getOperand(0));
8554     TmpInst.addOperand(Inst.getOperand(5));
8555     TmpInst.addOperand(Inst.getOperand(0));
8556     TmpInst.addOperand(Inst.getOperand(2));
8557     TmpInst.addOperand(Inst.getOperand(3));
8558     TmpInst.addOperand(Inst.getOperand(4));
8559     Inst = TmpInst;
8560     return true;
8561   }
8562   case ARM::t2ADDrr: {
8563     // If the destination and first source operand are the same, and
8564     // there's no setting of the flags, use encoding T2 instead of T3.
8565     // Note that this is only for ADD, not SUB. This mirrors the system
8566     // 'as' behaviour.  Also take advantage of ADD being commutative.
8567     // Make sure the wide encoding wasn't explicit.
8568     bool Swap = false;
8569     auto DestReg = Inst.getOperand(0).getReg();
8570     bool Transform = DestReg == Inst.getOperand(1).getReg();
8571     if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
8572       Transform = true;
8573       Swap = true;
8574     }
8575     if (!Transform ||
8576         Inst.getOperand(5).getReg() != 0 ||
8577         HasWideQualifier)
8578       break;
8579     MCInst TmpInst;
8580     TmpInst.setOpcode(ARM::tADDhirr);
8581     TmpInst.addOperand(Inst.getOperand(0));
8582     TmpInst.addOperand(Inst.getOperand(0));
8583     TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
8584     TmpInst.addOperand(Inst.getOperand(3));
8585     TmpInst.addOperand(Inst.getOperand(4));
8586     Inst = TmpInst;
8587     return true;
8588   }
8589   case ARM::tADDrSP:
8590     // If the non-SP source operand and the destination operand are not the
8591     // same, we need to use the 32-bit encoding if it's available.
8592     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8593       Inst.setOpcode(ARM::t2ADDrr);
8594       Inst.addOperand(MCOperand::createReg(0)); // cc_out
8595       return true;
8596     }
8597     break;
8598   case ARM::tB:
8599     // A Thumb conditional branch outside of an IT block is a tBcc.
8600     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
8601       Inst.setOpcode(ARM::tBcc);
8602       return true;
8603     }
8604     break;
8605   case ARM::t2B:
8606     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
8607     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
8608       Inst.setOpcode(ARM::t2Bcc);
8609       return true;
8610     }
8611     break;
8612   case ARM::t2Bcc:
8613     // If the conditional is AL or we're in an IT block, we really want t2B.
8614     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
8615       Inst.setOpcode(ARM::t2B);
8616       return true;
8617     }
8618     break;
8619   case ARM::tBcc:
8620     // If the conditional is AL, we really want tB.
8621     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
8622       Inst.setOpcode(ARM::tB);
8623       return true;
8624     }
8625     break;
8626   case ARM::tLDMIA: {
8627     // If the register list contains any high registers, or if the writeback
8628     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
8629     // instead if we're in Thumb2. Otherwise, this should have generated
8630     // an error in validateInstruction().
8631     unsigned Rn = Inst.getOperand(0).getReg();
8632     bool hasWritebackToken =
8633         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8634          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
8635     bool listContainsBase;
8636     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
8637         (!listContainsBase && !hasWritebackToken) ||
8638         (listContainsBase && hasWritebackToken)) {
8639       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8640       assert(isThumbTwo());
8641       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
8642       // If we're switching to the updating version, we need to insert
8643       // the writeback tied operand.
8644       if (hasWritebackToken)
8645         Inst.insert(Inst.begin(),
8646                     MCOperand::createReg(Inst.getOperand(0).getReg()));
8647       return true;
8648     }
8649     break;
8650   }
8651   case ARM::tSTMIA_UPD: {
8652     // If the register list contains any high registers, we need to use
8653     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8654     // should have generated an error in validateInstruction().
8655     unsigned Rn = Inst.getOperand(0).getReg();
8656     bool listContainsBase;
8657     if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
8658       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8659       assert(isThumbTwo());
8660       Inst.setOpcode(ARM::t2STMIA_UPD);
8661       return true;
8662     }
8663     break;
8664   }
8665   case ARM::tPOP: {
8666     bool listContainsBase;
8667     // If the register list contains any high registers, we need to use
8668     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8669     // should have generated an error in validateInstruction().
8670     if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
8671       return false;
8672     assert(isThumbTwo());
8673     Inst.setOpcode(ARM::t2LDMIA_UPD);
8674     // Add the base register and writeback operands.
8675     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8676     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8677     return true;
8678   }
8679   case ARM::tPUSH: {
8680     bool listContainsBase;
8681     if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
8682       return false;
8683     assert(isThumbTwo());
8684     Inst.setOpcode(ARM::t2STMDB_UPD);
8685     // Add the base register and writeback operands.
8686     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8687     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8688     return true;
8689   }
8690   case ARM::t2MOVi:
8691     // If we can use the 16-bit encoding and the user didn't explicitly
8692     // request the 32-bit variant, transform it here.
8693     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8694         (Inst.getOperand(1).isImm() &&
8695          (unsigned)Inst.getOperand(1).getImm() <= 255) &&
8696         Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8697         !HasWideQualifier) {
8698       // The operands aren't in the same order for tMOVi8...
8699       MCInst TmpInst;
8700       TmpInst.setOpcode(ARM::tMOVi8);
8701       TmpInst.addOperand(Inst.getOperand(0));
8702       TmpInst.addOperand(Inst.getOperand(4));
8703       TmpInst.addOperand(Inst.getOperand(1));
8704       TmpInst.addOperand(Inst.getOperand(2));
8705       TmpInst.addOperand(Inst.getOperand(3));
8706       Inst = TmpInst;
8707       return true;
8708     }
8709     break;
8710 
8711   case ARM::t2MOVr:
8712     // If we can use the 16-bit encoding and the user didn't explicitly
8713     // request the 32-bit variant, transform it here.
8714     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8715         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8716         Inst.getOperand(2).getImm() == ARMCC::AL &&
8717         Inst.getOperand(4).getReg() == ARM::CPSR &&
8718         !HasWideQualifier) {
8719       // The operands aren't the same for tMOV[S]r... (no cc_out)
8720       MCInst TmpInst;
8721       TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
8722       TmpInst.addOperand(Inst.getOperand(0));
8723       TmpInst.addOperand(Inst.getOperand(1));
8724       TmpInst.addOperand(Inst.getOperand(2));
8725       TmpInst.addOperand(Inst.getOperand(3));
8726       Inst = TmpInst;
8727       return true;
8728     }
8729     break;
8730 
8731   case ARM::t2SXTH:
8732   case ARM::t2SXTB:
8733   case ARM::t2UXTH:
8734   case ARM::t2UXTB:
8735     // If we can use the 16-bit encoding and the user didn't explicitly
8736     // request the 32-bit variant, transform it here.
8737     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8738         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8739         Inst.getOperand(2).getImm() == 0 &&
8740         !HasWideQualifier) {
8741       unsigned NewOpc;
8742       switch (Inst.getOpcode()) {
8743       default: llvm_unreachable("Illegal opcode!");
8744       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
8745       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
8746       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
8747       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
8748       }
8749       // The operands aren't the same for thumb1 (no rotate operand).
8750       MCInst TmpInst;
8751       TmpInst.setOpcode(NewOpc);
8752       TmpInst.addOperand(Inst.getOperand(0));
8753       TmpInst.addOperand(Inst.getOperand(1));
8754       TmpInst.addOperand(Inst.getOperand(3));
8755       TmpInst.addOperand(Inst.getOperand(4));
8756       Inst = TmpInst;
8757       return true;
8758     }
8759     break;
8760 
8761   case ARM::MOVsi: {
8762     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
8763     // rrx shifts and asr/lsr of #32 is encoded as 0
8764     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
8765       return false;
8766     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
8767       // Shifting by zero is accepted as a vanilla 'MOVr'
8768       MCInst TmpInst;
8769       TmpInst.setOpcode(ARM::MOVr);
8770       TmpInst.addOperand(Inst.getOperand(0));
8771       TmpInst.addOperand(Inst.getOperand(1));
8772       TmpInst.addOperand(Inst.getOperand(3));
8773       TmpInst.addOperand(Inst.getOperand(4));
8774       TmpInst.addOperand(Inst.getOperand(5));
8775       Inst = TmpInst;
8776       return true;
8777     }
8778     return false;
8779   }
8780   case ARM::ANDrsi:
8781   case ARM::ORRrsi:
8782   case ARM::EORrsi:
8783   case ARM::BICrsi:
8784   case ARM::SUBrsi:
8785   case ARM::ADDrsi: {
8786     unsigned newOpc;
8787     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
8788     if (SOpc == ARM_AM::rrx) return false;
8789     switch (Inst.getOpcode()) {
8790     default: llvm_unreachable("unexpected opcode!");
8791     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
8792     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
8793     case ARM::EORrsi: newOpc = ARM::EORrr; break;
8794     case ARM::BICrsi: newOpc = ARM::BICrr; break;
8795     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
8796     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
8797     }
8798     // If the shift is by zero, use the non-shifted instruction definition.
8799     // The exception is for right shifts, where 0 == 32
8800     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
8801         !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
8802       MCInst TmpInst;
8803       TmpInst.setOpcode(newOpc);
8804       TmpInst.addOperand(Inst.getOperand(0));
8805       TmpInst.addOperand(Inst.getOperand(1));
8806       TmpInst.addOperand(Inst.getOperand(2));
8807       TmpInst.addOperand(Inst.getOperand(4));
8808       TmpInst.addOperand(Inst.getOperand(5));
8809       TmpInst.addOperand(Inst.getOperand(6));
8810       Inst = TmpInst;
8811       return true;
8812     }
8813     return false;
8814   }
8815   case ARM::ITasm:
8816   case ARM::t2IT: {
8817     MCOperand &MO = Inst.getOperand(1);
8818     unsigned Mask = MO.getImm();
8819     ARMCC::CondCodes Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
8820 
8821     // Set up the IT block state according to the IT instruction we just
8822     // matched.
8823     assert(!inITBlock() && "nested IT blocks?!");
8824     startExplicitITBlock(Cond, Mask);
8825     MO.setImm(getITMaskEncoding());
8826     break;
8827   }
8828   case ARM::t2LSLrr:
8829   case ARM::t2LSRrr:
8830   case ARM::t2ASRrr:
8831   case ARM::t2SBCrr:
8832   case ARM::t2RORrr:
8833   case ARM::t2BICrr:
8834     // Assemblers should use the narrow encodings of these instructions when permissible.
8835     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
8836          isARMLowRegister(Inst.getOperand(2).getReg())) &&
8837         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8838         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8839         !HasWideQualifier) {
8840       unsigned NewOpc;
8841       switch (Inst.getOpcode()) {
8842         default: llvm_unreachable("unexpected opcode");
8843         case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
8844         case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
8845         case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
8846         case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
8847         case ARM::t2RORrr: NewOpc = ARM::tROR; break;
8848         case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
8849       }
8850       MCInst TmpInst;
8851       TmpInst.setOpcode(NewOpc);
8852       TmpInst.addOperand(Inst.getOperand(0));
8853       TmpInst.addOperand(Inst.getOperand(5));
8854       TmpInst.addOperand(Inst.getOperand(1));
8855       TmpInst.addOperand(Inst.getOperand(2));
8856       TmpInst.addOperand(Inst.getOperand(3));
8857       TmpInst.addOperand(Inst.getOperand(4));
8858       Inst = TmpInst;
8859       return true;
8860     }
8861     return false;
8862 
8863   case ARM::t2ANDrr:
8864   case ARM::t2EORrr:
8865   case ARM::t2ADCrr:
8866   case ARM::t2ORRrr:
8867     // Assemblers should use the narrow encodings of these instructions when permissible.
8868     // These instructions are special in that they are commutable, so shorter encodings
8869     // are available more often.
8870     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
8871          isARMLowRegister(Inst.getOperand(2).getReg())) &&
8872         (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
8873          Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
8874         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8875         !HasWideQualifier) {
8876       unsigned NewOpc;
8877       switch (Inst.getOpcode()) {
8878         default: llvm_unreachable("unexpected opcode");
8879         case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
8880         case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
8881         case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
8882         case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
8883       }
8884       MCInst TmpInst;
8885       TmpInst.setOpcode(NewOpc);
8886       TmpInst.addOperand(Inst.getOperand(0));
8887       TmpInst.addOperand(Inst.getOperand(5));
8888       if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
8889         TmpInst.addOperand(Inst.getOperand(1));
8890         TmpInst.addOperand(Inst.getOperand(2));
8891       } else {
8892         TmpInst.addOperand(Inst.getOperand(2));
8893         TmpInst.addOperand(Inst.getOperand(1));
8894       }
8895       TmpInst.addOperand(Inst.getOperand(3));
8896       TmpInst.addOperand(Inst.getOperand(4));
8897       Inst = TmpInst;
8898       return true;
8899     }
8900     return false;
8901   }
8902   return false;
8903 }
8904 
8905 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
8906   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
8907   // suffix depending on whether they're in an IT block or not.
8908   unsigned Opc = Inst.getOpcode();
8909   const MCInstrDesc &MCID = MII.get(Opc);
8910   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
8911     assert(MCID.hasOptionalDef() &&
8912            "optionally flag setting instruction missing optional def operand");
8913     assert(MCID.NumOperands == Inst.getNumOperands() &&
8914            "operand count mismatch!");
8915     // Find the optional-def operand (cc_out).
8916     unsigned OpNo;
8917     for (OpNo = 0;
8918          !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
8919          ++OpNo)
8920       ;
8921     // If we're parsing Thumb1, reject it completely.
8922     if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
8923       return Match_RequiresFlagSetting;
8924     // If we're parsing Thumb2, which form is legal depends on whether we're
8925     // in an IT block.
8926     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
8927         !inITBlock())
8928       return Match_RequiresITBlock;
8929     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
8930         inITBlock())
8931       return Match_RequiresNotITBlock;
8932     // LSL with zero immediate is not allowed in an IT block
8933     if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
8934       return Match_RequiresNotITBlock;
8935   } else if (isThumbOne()) {
8936     // Some high-register supporting Thumb1 encodings only allow both registers
8937     // to be from r0-r7 when in Thumb2.
8938     if (Opc == ARM::tADDhirr && !hasV6MOps() &&
8939         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8940         isARMLowRegister(Inst.getOperand(2).getReg()))
8941       return Match_RequiresThumb2;
8942     // Others only require ARMv6 or later.
8943     else if (Opc == ARM::tMOVr && !hasV6Ops() &&
8944              isARMLowRegister(Inst.getOperand(0).getReg()) &&
8945              isARMLowRegister(Inst.getOperand(1).getReg()))
8946       return Match_RequiresV6;
8947   }
8948 
8949   // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
8950   // than the loop below can handle, so it uses the GPRnopc register class and
8951   // we do SP handling here.
8952   if (Opc == ARM::t2MOVr && !hasV8Ops())
8953   {
8954     // SP as both source and destination is not allowed
8955     if (Inst.getOperand(0).getReg() == ARM::SP &&
8956         Inst.getOperand(1).getReg() == ARM::SP)
8957       return Match_RequiresV8;
8958     // When flags-setting SP as either source or destination is not allowed
8959     if (Inst.getOperand(4).getReg() == ARM::CPSR &&
8960         (Inst.getOperand(0).getReg() == ARM::SP ||
8961          Inst.getOperand(1).getReg() == ARM::SP))
8962       return Match_RequiresV8;
8963   }
8964 
8965   // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
8966   // ARMv8-A.
8967   if ((Inst.getOpcode() == ARM::VMRS || Inst.getOpcode() == ARM::VMSR) &&
8968       Inst.getOperand(0).getReg() == ARM::SP && (isThumb() && !hasV8Ops()))
8969     return Match_InvalidOperand;
8970 
8971   for (unsigned I = 0; I < MCID.NumOperands; ++I)
8972     if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
8973       // rGPRRegClass excludes PC, and also excluded SP before ARMv8
8974       if ((Inst.getOperand(I).getReg() == ARM::SP) && !hasV8Ops())
8975         return Match_RequiresV8;
8976       else if (Inst.getOperand(I).getReg() == ARM::PC)
8977         return Match_InvalidOperand;
8978     }
8979 
8980   return Match_Success;
8981 }
8982 
8983 namespace llvm {
8984 
8985 template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
8986   return true; // In an assembly source, no need to second-guess
8987 }
8988 
8989 } // end namespace llvm
8990 
8991 // Returns true if Inst is unpredictable if it is in and IT block, but is not
8992 // the last instruction in the block.
8993 bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
8994   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
8995 
8996   // All branch & call instructions terminate IT blocks with the exception of
8997   // SVC.
8998   if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
8999       MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
9000     return true;
9001 
9002   // Any arithmetic instruction which writes to the PC also terminates the IT
9003   // block.
9004   for (unsigned OpIdx = 0; OpIdx < MCID.getNumDefs(); ++OpIdx) {
9005     MCOperand &Op = Inst.getOperand(OpIdx);
9006     if (Op.isReg() && Op.getReg() == ARM::PC)
9007       return true;
9008   }
9009 
9010   if (MCID.hasImplicitDefOfPhysReg(ARM::PC, MRI))
9011     return true;
9012 
9013   // Instructions with variable operand lists, which write to the variable
9014   // operands. We only care about Thumb instructions here, as ARM instructions
9015   // obviously can't be in an IT block.
9016   switch (Inst.getOpcode()) {
9017   case ARM::tLDMIA:
9018   case ARM::t2LDMIA:
9019   case ARM::t2LDMIA_UPD:
9020   case ARM::t2LDMDB:
9021   case ARM::t2LDMDB_UPD:
9022     if (listContainsReg(Inst, 3, ARM::PC))
9023       return true;
9024     break;
9025   case ARM::tPOP:
9026     if (listContainsReg(Inst, 2, ARM::PC))
9027       return true;
9028     break;
9029   }
9030 
9031   return false;
9032 }
9033 
9034 unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
9035                                           SmallVectorImpl<NearMissInfo> &NearMisses,
9036                                           bool MatchingInlineAsm,
9037                                           bool &EmitInITBlock,
9038                                           MCStreamer &Out) {
9039   // If we can't use an implicit IT block here, just match as normal.
9040   if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
9041     return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
9042 
9043   // Try to match the instruction in an extension of the current IT block (if
9044   // there is one).
9045   if (inImplicitITBlock()) {
9046     extendImplicitITBlock(ITState.Cond);
9047     if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
9048             Match_Success) {
9049       // The match succeded, but we still have to check that the instruction is
9050       // valid in this implicit IT block.
9051       const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9052       if (MCID.isPredicable()) {
9053         ARMCC::CondCodes InstCond =
9054             (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9055                 .getImm();
9056         ARMCC::CondCodes ITCond = currentITCond();
9057         if (InstCond == ITCond) {
9058           EmitInITBlock = true;
9059           return Match_Success;
9060         } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
9061           invertCurrentITCondition();
9062           EmitInITBlock = true;
9063           return Match_Success;
9064         }
9065       }
9066     }
9067     rewindImplicitITPosition();
9068   }
9069 
9070   // Finish the current IT block, and try to match outside any IT block.
9071   flushPendingInstructions(Out);
9072   unsigned PlainMatchResult =
9073       MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
9074   if (PlainMatchResult == Match_Success) {
9075     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9076     if (MCID.isPredicable()) {
9077       ARMCC::CondCodes InstCond =
9078           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9079               .getImm();
9080       // Some forms of the branch instruction have their own condition code
9081       // fields, so can be conditionally executed without an IT block.
9082       if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
9083         EmitInITBlock = false;
9084         return Match_Success;
9085       }
9086       if (InstCond == ARMCC::AL) {
9087         EmitInITBlock = false;
9088         return Match_Success;
9089       }
9090     } else {
9091       EmitInITBlock = false;
9092       return Match_Success;
9093     }
9094   }
9095 
9096   // Try to match in a new IT block. The matcher doesn't check the actual
9097   // condition, so we create an IT block with a dummy condition, and fix it up
9098   // once we know the actual condition.
9099   startImplicitITBlock();
9100   if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
9101       Match_Success) {
9102     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9103     if (MCID.isPredicable()) {
9104       ITState.Cond =
9105           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9106               .getImm();
9107       EmitInITBlock = true;
9108       return Match_Success;
9109     }
9110   }
9111   discardImplicitITBlock();
9112 
9113   // If none of these succeed, return the error we got when trying to match
9114   // outside any IT blocks.
9115   EmitInITBlock = false;
9116   return PlainMatchResult;
9117 }
9118 
9119 static std::string ARMMnemonicSpellCheck(StringRef S, uint64_t FBS,
9120                                          unsigned VariantID = 0);
9121 
9122 static const char *getSubtargetFeatureName(uint64_t Val);
9123 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
9124                                            OperandVector &Operands,
9125                                            MCStreamer &Out, uint64_t &ErrorInfo,
9126                                            bool MatchingInlineAsm) {
9127   MCInst Inst;
9128   unsigned MatchResult;
9129   bool PendConditionalInstruction = false;
9130 
9131   SmallVector<NearMissInfo, 4> NearMisses;
9132   MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
9133                                  PendConditionalInstruction, Out);
9134 
9135   switch (MatchResult) {
9136   case Match_Success:
9137     // Context sensitive operand constraints aren't handled by the matcher,
9138     // so check them here.
9139     if (validateInstruction(Inst, Operands)) {
9140       // Still progress the IT block, otherwise one wrong condition causes
9141       // nasty cascading errors.
9142       forwardITPosition();
9143       return true;
9144     }
9145 
9146     { // processInstruction() updates inITBlock state, we need to save it away
9147       bool wasInITBlock = inITBlock();
9148 
9149       // Some instructions need post-processing to, for example, tweak which
9150       // encoding is selected. Loop on it while changes happen so the
9151       // individual transformations can chain off each other. E.g.,
9152       // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
9153       while (processInstruction(Inst, Operands, Out))
9154         ;
9155 
9156       // Only after the instruction is fully processed, we can validate it
9157       if (wasInITBlock && hasV8Ops() && isThumb() &&
9158           !isV8EligibleForIT(&Inst)) {
9159         Warning(IDLoc, "deprecated instruction in IT block");
9160       }
9161     }
9162 
9163     // Only move forward at the very end so that everything in validate
9164     // and process gets a consistent answer about whether we're in an IT
9165     // block.
9166     forwardITPosition();
9167 
9168     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
9169     // doesn't actually encode.
9170     if (Inst.getOpcode() == ARM::ITasm)
9171       return false;
9172 
9173     Inst.setLoc(IDLoc);
9174     if (PendConditionalInstruction) {
9175       PendingConditionalInsts.push_back(Inst);
9176       if (isITBlockFull() || isITBlockTerminator(Inst))
9177         flushPendingInstructions(Out);
9178     } else {
9179       Out.EmitInstruction(Inst, getSTI());
9180     }
9181     return false;
9182   case Match_NearMisses:
9183     ReportNearMisses(NearMisses, IDLoc, Operands);
9184     return true;
9185   case Match_MnemonicFail: {
9186     uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
9187     std::string Suggestion = ARMMnemonicSpellCheck(
9188       ((ARMOperand &)*Operands[0]).getToken(), FBS);
9189     return Error(IDLoc, "invalid instruction" + Suggestion,
9190                  ((ARMOperand &)*Operands[0]).getLocRange());
9191   }
9192   }
9193 
9194   llvm_unreachable("Implement any new match types added!");
9195 }
9196 
9197 /// parseDirective parses the arm specific directives
9198 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
9199   const MCObjectFileInfo::Environment Format =
9200     getContext().getObjectFileInfo()->getObjectFileType();
9201   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
9202   bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
9203 
9204   StringRef IDVal = DirectiveID.getIdentifier();
9205   if (IDVal == ".word")
9206     parseLiteralValues(4, DirectiveID.getLoc());
9207   else if (IDVal == ".short" || IDVal == ".hword")
9208     parseLiteralValues(2, DirectiveID.getLoc());
9209   else if (IDVal == ".thumb")
9210     parseDirectiveThumb(DirectiveID.getLoc());
9211   else if (IDVal == ".arm")
9212     parseDirectiveARM(DirectiveID.getLoc());
9213   else if (IDVal == ".thumb_func")
9214     parseDirectiveThumbFunc(DirectiveID.getLoc());
9215   else if (IDVal == ".code")
9216     parseDirectiveCode(DirectiveID.getLoc());
9217   else if (IDVal == ".syntax")
9218     parseDirectiveSyntax(DirectiveID.getLoc());
9219   else if (IDVal == ".unreq")
9220     parseDirectiveUnreq(DirectiveID.getLoc());
9221   else if (IDVal == ".fnend")
9222     parseDirectiveFnEnd(DirectiveID.getLoc());
9223   else if (IDVal == ".cantunwind")
9224     parseDirectiveCantUnwind(DirectiveID.getLoc());
9225   else if (IDVal == ".personality")
9226     parseDirectivePersonality(DirectiveID.getLoc());
9227   else if (IDVal == ".handlerdata")
9228     parseDirectiveHandlerData(DirectiveID.getLoc());
9229   else if (IDVal == ".setfp")
9230     parseDirectiveSetFP(DirectiveID.getLoc());
9231   else if (IDVal == ".pad")
9232     parseDirectivePad(DirectiveID.getLoc());
9233   else if (IDVal == ".save")
9234     parseDirectiveRegSave(DirectiveID.getLoc(), false);
9235   else if (IDVal == ".vsave")
9236     parseDirectiveRegSave(DirectiveID.getLoc(), true);
9237   else if (IDVal == ".ltorg" || IDVal == ".pool")
9238     parseDirectiveLtorg(DirectiveID.getLoc());
9239   else if (IDVal == ".even")
9240     parseDirectiveEven(DirectiveID.getLoc());
9241   else if (IDVal == ".personalityindex")
9242     parseDirectivePersonalityIndex(DirectiveID.getLoc());
9243   else if (IDVal == ".unwind_raw")
9244     parseDirectiveUnwindRaw(DirectiveID.getLoc());
9245   else if (IDVal == ".movsp")
9246     parseDirectiveMovSP(DirectiveID.getLoc());
9247   else if (IDVal == ".arch_extension")
9248     parseDirectiveArchExtension(DirectiveID.getLoc());
9249   else if (IDVal == ".align")
9250     return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
9251   else if (IDVal == ".thumb_set")
9252     parseDirectiveThumbSet(DirectiveID.getLoc());
9253   else if (!IsMachO && !IsCOFF) {
9254     if (IDVal == ".arch")
9255       parseDirectiveArch(DirectiveID.getLoc());
9256     else if (IDVal == ".cpu")
9257       parseDirectiveCPU(DirectiveID.getLoc());
9258     else if (IDVal == ".eabi_attribute")
9259       parseDirectiveEabiAttr(DirectiveID.getLoc());
9260     else if (IDVal == ".fpu")
9261       parseDirectiveFPU(DirectiveID.getLoc());
9262     else if (IDVal == ".fnstart")
9263       parseDirectiveFnStart(DirectiveID.getLoc());
9264     else if (IDVal == ".inst")
9265       parseDirectiveInst(DirectiveID.getLoc());
9266     else if (IDVal == ".inst.n")
9267       parseDirectiveInst(DirectiveID.getLoc(), 'n');
9268     else if (IDVal == ".inst.w")
9269       parseDirectiveInst(DirectiveID.getLoc(), 'w');
9270     else if (IDVal == ".object_arch")
9271       parseDirectiveObjectArch(DirectiveID.getLoc());
9272     else if (IDVal == ".tlsdescseq")
9273       parseDirectiveTLSDescSeq(DirectiveID.getLoc());
9274     else
9275       return true;
9276   } else
9277     return true;
9278   return false;
9279 }
9280 
9281 /// parseLiteralValues
9282 ///  ::= .hword expression [, expression]*
9283 ///  ::= .short expression [, expression]*
9284 ///  ::= .word expression [, expression]*
9285 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
9286   auto parseOne = [&]() -> bool {
9287     const MCExpr *Value;
9288     if (getParser().parseExpression(Value))
9289       return true;
9290     getParser().getStreamer().EmitValue(Value, Size, L);
9291     return false;
9292   };
9293   return (parseMany(parseOne));
9294 }
9295 
9296 /// parseDirectiveThumb
9297 ///  ::= .thumb
9298 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
9299   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
9300       check(!hasThumb(), L, "target does not support Thumb mode"))
9301     return true;
9302 
9303   if (!isThumb())
9304     SwitchMode();
9305 
9306   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
9307   return false;
9308 }
9309 
9310 /// parseDirectiveARM
9311 ///  ::= .arm
9312 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
9313   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
9314       check(!hasARM(), L, "target does not support ARM mode"))
9315     return true;
9316 
9317   if (isThumb())
9318     SwitchMode();
9319   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
9320   return false;
9321 }
9322 
9323 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
9324   // We need to flush the current implicit IT block on a label, because it is
9325   // not legal to branch into an IT block.
9326   flushPendingInstructions(getStreamer());
9327   if (NextSymbolIsThumb) {
9328     getParser().getStreamer().EmitThumbFunc(Symbol);
9329     NextSymbolIsThumb = false;
9330   }
9331 }
9332 
9333 /// parseDirectiveThumbFunc
9334 ///  ::= .thumbfunc symbol_name
9335 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
9336   MCAsmParser &Parser = getParser();
9337   const auto Format = getContext().getObjectFileInfo()->getObjectFileType();
9338   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
9339 
9340   // Darwin asm has (optionally) function name after .thumb_func direction
9341   // ELF doesn't
9342 
9343   if (IsMachO) {
9344     if (Parser.getTok().is(AsmToken::Identifier) ||
9345         Parser.getTok().is(AsmToken::String)) {
9346       MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
9347           Parser.getTok().getIdentifier());
9348       getParser().getStreamer().EmitThumbFunc(Func);
9349       Parser.Lex();
9350       if (parseToken(AsmToken::EndOfStatement,
9351                      "unexpected token in '.thumb_func' directive"))
9352         return true;
9353       return false;
9354     }
9355   }
9356 
9357   if (parseToken(AsmToken::EndOfStatement,
9358                  "unexpected token in '.thumb_func' directive"))
9359     return true;
9360 
9361   NextSymbolIsThumb = true;
9362   return false;
9363 }
9364 
9365 /// parseDirectiveSyntax
9366 ///  ::= .syntax unified | divided
9367 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
9368   MCAsmParser &Parser = getParser();
9369   const AsmToken &Tok = Parser.getTok();
9370   if (Tok.isNot(AsmToken::Identifier)) {
9371     Error(L, "unexpected token in .syntax directive");
9372     return false;
9373   }
9374 
9375   StringRef Mode = Tok.getString();
9376   Parser.Lex();
9377   if (check(Mode == "divided" || Mode == "DIVIDED", L,
9378             "'.syntax divided' arm assembly not supported") ||
9379       check(Mode != "unified" && Mode != "UNIFIED", L,
9380             "unrecognized syntax mode in .syntax directive") ||
9381       parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
9382     return true;
9383 
9384   // TODO tell the MC streamer the mode
9385   // getParser().getStreamer().Emit???();
9386   return false;
9387 }
9388 
9389 /// parseDirectiveCode
9390 ///  ::= .code 16 | 32
9391 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
9392   MCAsmParser &Parser = getParser();
9393   const AsmToken &Tok = Parser.getTok();
9394   if (Tok.isNot(AsmToken::Integer))
9395     return Error(L, "unexpected token in .code directive");
9396   int64_t Val = Parser.getTok().getIntVal();
9397   if (Val != 16 && Val != 32) {
9398     Error(L, "invalid operand to .code directive");
9399     return false;
9400   }
9401   Parser.Lex();
9402 
9403   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
9404     return true;
9405 
9406   if (Val == 16) {
9407     if (!hasThumb())
9408       return Error(L, "target does not support Thumb mode");
9409 
9410     if (!isThumb())
9411       SwitchMode();
9412     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
9413   } else {
9414     if (!hasARM())
9415       return Error(L, "target does not support ARM mode");
9416 
9417     if (isThumb())
9418       SwitchMode();
9419     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
9420   }
9421 
9422   return false;
9423 }
9424 
9425 /// parseDirectiveReq
9426 ///  ::= name .req registername
9427 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
9428   MCAsmParser &Parser = getParser();
9429   Parser.Lex(); // Eat the '.req' token.
9430   unsigned Reg;
9431   SMLoc SRegLoc, ERegLoc;
9432   if (check(ParseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
9433             "register name expected") ||
9434       parseToken(AsmToken::EndOfStatement,
9435                  "unexpected input in .req directive."))
9436     return true;
9437 
9438   if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
9439     return Error(SRegLoc,
9440                  "redefinition of '" + Name + "' does not match original.");
9441 
9442   return false;
9443 }
9444 
9445 /// parseDirectiveUneq
9446 ///  ::= .unreq registername
9447 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
9448   MCAsmParser &Parser = getParser();
9449   if (Parser.getTok().isNot(AsmToken::Identifier))
9450     return Error(L, "unexpected input in .unreq directive.");
9451   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
9452   Parser.Lex(); // Eat the identifier.
9453   if (parseToken(AsmToken::EndOfStatement,
9454                  "unexpected input in '.unreq' directive"))
9455     return true;
9456   return false;
9457 }
9458 
9459 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
9460 // before, if supported by the new target, or emit mapping symbols for the mode
9461 // switch.
9462 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
9463   if (WasThumb != isThumb()) {
9464     if (WasThumb && hasThumb()) {
9465       // Stay in Thumb mode
9466       SwitchMode();
9467     } else if (!WasThumb && hasARM()) {
9468       // Stay in ARM mode
9469       SwitchMode();
9470     } else {
9471       // Mode switch forced, because the new arch doesn't support the old mode.
9472       getParser().getStreamer().EmitAssemblerFlag(isThumb() ? MCAF_Code16
9473                                                             : MCAF_Code32);
9474       // Warn about the implcit mode switch. GAS does not switch modes here,
9475       // but instead stays in the old mode, reporting an error on any following
9476       // instructions as the mode does not exist on the target.
9477       Warning(Loc, Twine("new target does not support ") +
9478                        (WasThumb ? "thumb" : "arm") + " mode, switching to " +
9479                        (!WasThumb ? "thumb" : "arm") + " mode");
9480     }
9481   }
9482 }
9483 
9484 /// parseDirectiveArch
9485 ///  ::= .arch token
9486 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
9487   StringRef Arch = getParser().parseStringToEndOfStatement().trim();
9488   ARM::ArchKind ID = ARM::parseArch(Arch);
9489 
9490   if (ID == ARM::ArchKind::INVALID)
9491     return Error(L, "Unknown arch name");
9492 
9493   bool WasThumb = isThumb();
9494   Triple T;
9495   MCSubtargetInfo &STI = copySTI();
9496   STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str());
9497   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9498   FixModeAfterArchChange(WasThumb, L);
9499 
9500   getTargetStreamer().emitArch(ID);
9501   return false;
9502 }
9503 
9504 /// parseDirectiveEabiAttr
9505 ///  ::= .eabi_attribute int, int [, "str"]
9506 ///  ::= .eabi_attribute Tag_name, int [, "str"]
9507 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
9508   MCAsmParser &Parser = getParser();
9509   int64_t Tag;
9510   SMLoc TagLoc;
9511   TagLoc = Parser.getTok().getLoc();
9512   if (Parser.getTok().is(AsmToken::Identifier)) {
9513     StringRef Name = Parser.getTok().getIdentifier();
9514     Tag = ARMBuildAttrs::AttrTypeFromString(Name);
9515     if (Tag == -1) {
9516       Error(TagLoc, "attribute name not recognised: " + Name);
9517       return false;
9518     }
9519     Parser.Lex();
9520   } else {
9521     const MCExpr *AttrExpr;
9522 
9523     TagLoc = Parser.getTok().getLoc();
9524     if (Parser.parseExpression(AttrExpr))
9525       return true;
9526 
9527     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
9528     if (check(!CE, TagLoc, "expected numeric constant"))
9529       return true;
9530 
9531     Tag = CE->getValue();
9532   }
9533 
9534   if (Parser.parseToken(AsmToken::Comma, "comma expected"))
9535     return true;
9536 
9537   StringRef StringValue = "";
9538   bool IsStringValue = false;
9539 
9540   int64_t IntegerValue = 0;
9541   bool IsIntegerValue = false;
9542 
9543   if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
9544     IsStringValue = true;
9545   else if (Tag == ARMBuildAttrs::compatibility) {
9546     IsStringValue = true;
9547     IsIntegerValue = true;
9548   } else if (Tag < 32 || Tag % 2 == 0)
9549     IsIntegerValue = true;
9550   else if (Tag % 2 == 1)
9551     IsStringValue = true;
9552   else
9553     llvm_unreachable("invalid tag type");
9554 
9555   if (IsIntegerValue) {
9556     const MCExpr *ValueExpr;
9557     SMLoc ValueExprLoc = Parser.getTok().getLoc();
9558     if (Parser.parseExpression(ValueExpr))
9559       return true;
9560 
9561     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
9562     if (!CE)
9563       return Error(ValueExprLoc, "expected numeric constant");
9564     IntegerValue = CE->getValue();
9565   }
9566 
9567   if (Tag == ARMBuildAttrs::compatibility) {
9568     if (Parser.parseToken(AsmToken::Comma, "comma expected"))
9569       return true;
9570   }
9571 
9572   if (IsStringValue) {
9573     if (Parser.getTok().isNot(AsmToken::String))
9574       return Error(Parser.getTok().getLoc(), "bad string constant");
9575 
9576     StringValue = Parser.getTok().getStringContents();
9577     Parser.Lex();
9578   }
9579 
9580   if (Parser.parseToken(AsmToken::EndOfStatement,
9581                         "unexpected token in '.eabi_attribute' directive"))
9582     return true;
9583 
9584   if (IsIntegerValue && IsStringValue) {
9585     assert(Tag == ARMBuildAttrs::compatibility);
9586     getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
9587   } else if (IsIntegerValue)
9588     getTargetStreamer().emitAttribute(Tag, IntegerValue);
9589   else if (IsStringValue)
9590     getTargetStreamer().emitTextAttribute(Tag, StringValue);
9591   return false;
9592 }
9593 
9594 /// parseDirectiveCPU
9595 ///  ::= .cpu str
9596 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
9597   StringRef CPU = getParser().parseStringToEndOfStatement().trim();
9598   getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
9599 
9600   // FIXME: This is using table-gen data, but should be moved to
9601   // ARMTargetParser once that is table-gen'd.
9602   if (!getSTI().isCPUStringValid(CPU))
9603     return Error(L, "Unknown CPU name");
9604 
9605   bool WasThumb = isThumb();
9606   MCSubtargetInfo &STI = copySTI();
9607   STI.setDefaultFeatures(CPU, "");
9608   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9609   FixModeAfterArchChange(WasThumb, L);
9610 
9611   return false;
9612 }
9613 
9614 /// parseDirectiveFPU
9615 ///  ::= .fpu str
9616 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
9617   SMLoc FPUNameLoc = getTok().getLoc();
9618   StringRef FPU = getParser().parseStringToEndOfStatement().trim();
9619 
9620   unsigned ID = ARM::parseFPU(FPU);
9621   std::vector<StringRef> Features;
9622   if (!ARM::getFPUFeatures(ID, Features))
9623     return Error(FPUNameLoc, "Unknown FPU name");
9624 
9625   MCSubtargetInfo &STI = copySTI();
9626   for (auto Feature : Features)
9627     STI.ApplyFeatureFlag(Feature);
9628   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9629 
9630   getTargetStreamer().emitFPU(ID);
9631   return false;
9632 }
9633 
9634 /// parseDirectiveFnStart
9635 ///  ::= .fnstart
9636 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
9637   if (parseToken(AsmToken::EndOfStatement,
9638                  "unexpected token in '.fnstart' directive"))
9639     return true;
9640 
9641   if (UC.hasFnStart()) {
9642     Error(L, ".fnstart starts before the end of previous one");
9643     UC.emitFnStartLocNotes();
9644     return true;
9645   }
9646 
9647   // Reset the unwind directives parser state
9648   UC.reset();
9649 
9650   getTargetStreamer().emitFnStart();
9651 
9652   UC.recordFnStart(L);
9653   return false;
9654 }
9655 
9656 /// parseDirectiveFnEnd
9657 ///  ::= .fnend
9658 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
9659   if (parseToken(AsmToken::EndOfStatement,
9660                  "unexpected token in '.fnend' directive"))
9661     return true;
9662   // Check the ordering of unwind directives
9663   if (!UC.hasFnStart())
9664     return Error(L, ".fnstart must precede .fnend directive");
9665 
9666   // Reset the unwind directives parser state
9667   getTargetStreamer().emitFnEnd();
9668 
9669   UC.reset();
9670   return false;
9671 }
9672 
9673 /// parseDirectiveCantUnwind
9674 ///  ::= .cantunwind
9675 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
9676   if (parseToken(AsmToken::EndOfStatement,
9677                  "unexpected token in '.cantunwind' directive"))
9678     return true;
9679 
9680   UC.recordCantUnwind(L);
9681   // Check the ordering of unwind directives
9682   if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
9683     return true;
9684 
9685   if (UC.hasHandlerData()) {
9686     Error(L, ".cantunwind can't be used with .handlerdata directive");
9687     UC.emitHandlerDataLocNotes();
9688     return true;
9689   }
9690   if (UC.hasPersonality()) {
9691     Error(L, ".cantunwind can't be used with .personality directive");
9692     UC.emitPersonalityLocNotes();
9693     return true;
9694   }
9695 
9696   getTargetStreamer().emitCantUnwind();
9697   return false;
9698 }
9699 
9700 /// parseDirectivePersonality
9701 ///  ::= .personality name
9702 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
9703   MCAsmParser &Parser = getParser();
9704   bool HasExistingPersonality = UC.hasPersonality();
9705 
9706   // Parse the name of the personality routine
9707   if (Parser.getTok().isNot(AsmToken::Identifier))
9708     return Error(L, "unexpected input in .personality directive.");
9709   StringRef Name(Parser.getTok().getIdentifier());
9710   Parser.Lex();
9711 
9712   if (parseToken(AsmToken::EndOfStatement,
9713                  "unexpected token in '.personality' directive"))
9714     return true;
9715 
9716   UC.recordPersonality(L);
9717 
9718   // Check the ordering of unwind directives
9719   if (!UC.hasFnStart())
9720     return Error(L, ".fnstart must precede .personality directive");
9721   if (UC.cantUnwind()) {
9722     Error(L, ".personality can't be used with .cantunwind directive");
9723     UC.emitCantUnwindLocNotes();
9724     return true;
9725   }
9726   if (UC.hasHandlerData()) {
9727     Error(L, ".personality must precede .handlerdata directive");
9728     UC.emitHandlerDataLocNotes();
9729     return true;
9730   }
9731   if (HasExistingPersonality) {
9732     Error(L, "multiple personality directives");
9733     UC.emitPersonalityLocNotes();
9734     return true;
9735   }
9736 
9737   MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
9738   getTargetStreamer().emitPersonality(PR);
9739   return false;
9740 }
9741 
9742 /// parseDirectiveHandlerData
9743 ///  ::= .handlerdata
9744 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
9745   if (parseToken(AsmToken::EndOfStatement,
9746                  "unexpected token in '.handlerdata' directive"))
9747     return true;
9748 
9749   UC.recordHandlerData(L);
9750   // Check the ordering of unwind directives
9751   if (!UC.hasFnStart())
9752     return Error(L, ".fnstart must precede .personality directive");
9753   if (UC.cantUnwind()) {
9754     Error(L, ".handlerdata can't be used with .cantunwind directive");
9755     UC.emitCantUnwindLocNotes();
9756     return true;
9757   }
9758 
9759   getTargetStreamer().emitHandlerData();
9760   return false;
9761 }
9762 
9763 /// parseDirectiveSetFP
9764 ///  ::= .setfp fpreg, spreg [, offset]
9765 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
9766   MCAsmParser &Parser = getParser();
9767   // Check the ordering of unwind directives
9768   if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
9769       check(UC.hasHandlerData(), L,
9770             ".setfp must precede .handlerdata directive"))
9771     return true;
9772 
9773   // Parse fpreg
9774   SMLoc FPRegLoc = Parser.getTok().getLoc();
9775   int FPReg = tryParseRegister();
9776 
9777   if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
9778       Parser.parseToken(AsmToken::Comma, "comma expected"))
9779     return true;
9780 
9781   // Parse spreg
9782   SMLoc SPRegLoc = Parser.getTok().getLoc();
9783   int SPReg = tryParseRegister();
9784   if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
9785       check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
9786             "register should be either $sp or the latest fp register"))
9787     return true;
9788 
9789   // Update the frame pointer register
9790   UC.saveFPReg(FPReg);
9791 
9792   // Parse offset
9793   int64_t Offset = 0;
9794   if (Parser.parseOptionalToken(AsmToken::Comma)) {
9795     if (Parser.getTok().isNot(AsmToken::Hash) &&
9796         Parser.getTok().isNot(AsmToken::Dollar))
9797       return Error(Parser.getTok().getLoc(), "'#' expected");
9798     Parser.Lex(); // skip hash token.
9799 
9800     const MCExpr *OffsetExpr;
9801     SMLoc ExLoc = Parser.getTok().getLoc();
9802     SMLoc EndLoc;
9803     if (getParser().parseExpression(OffsetExpr, EndLoc))
9804       return Error(ExLoc, "malformed setfp offset");
9805     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9806     if (check(!CE, ExLoc, "setfp offset must be an immediate"))
9807       return true;
9808     Offset = CE->getValue();
9809   }
9810 
9811   if (Parser.parseToken(AsmToken::EndOfStatement))
9812     return true;
9813 
9814   getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
9815                                 static_cast<unsigned>(SPReg), Offset);
9816   return false;
9817 }
9818 
9819 /// parseDirective
9820 ///  ::= .pad offset
9821 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
9822   MCAsmParser &Parser = getParser();
9823   // Check the ordering of unwind directives
9824   if (!UC.hasFnStart())
9825     return Error(L, ".fnstart must precede .pad directive");
9826   if (UC.hasHandlerData())
9827     return Error(L, ".pad must precede .handlerdata directive");
9828 
9829   // Parse the offset
9830   if (Parser.getTok().isNot(AsmToken::Hash) &&
9831       Parser.getTok().isNot(AsmToken::Dollar))
9832     return Error(Parser.getTok().getLoc(), "'#' expected");
9833   Parser.Lex(); // skip hash token.
9834 
9835   const MCExpr *OffsetExpr;
9836   SMLoc ExLoc = Parser.getTok().getLoc();
9837   SMLoc EndLoc;
9838   if (getParser().parseExpression(OffsetExpr, EndLoc))
9839     return Error(ExLoc, "malformed pad offset");
9840   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9841   if (!CE)
9842     return Error(ExLoc, "pad offset must be an immediate");
9843 
9844   if (parseToken(AsmToken::EndOfStatement,
9845                  "unexpected token in '.pad' directive"))
9846     return true;
9847 
9848   getTargetStreamer().emitPad(CE->getValue());
9849   return false;
9850 }
9851 
9852 /// parseDirectiveRegSave
9853 ///  ::= .save  { registers }
9854 ///  ::= .vsave { registers }
9855 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
9856   // Check the ordering of unwind directives
9857   if (!UC.hasFnStart())
9858     return Error(L, ".fnstart must precede .save or .vsave directives");
9859   if (UC.hasHandlerData())
9860     return Error(L, ".save or .vsave must precede .handlerdata directive");
9861 
9862   // RAII object to make sure parsed operands are deleted.
9863   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
9864 
9865   // Parse the register list
9866   if (parseRegisterList(Operands) ||
9867       parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
9868     return true;
9869   ARMOperand &Op = (ARMOperand &)*Operands[0];
9870   if (!IsVector && !Op.isRegList())
9871     return Error(L, ".save expects GPR registers");
9872   if (IsVector && !Op.isDPRRegList())
9873     return Error(L, ".vsave expects DPR registers");
9874 
9875   getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
9876   return false;
9877 }
9878 
9879 /// parseDirectiveInst
9880 ///  ::= .inst opcode [, ...]
9881 ///  ::= .inst.n opcode [, ...]
9882 ///  ::= .inst.w opcode [, ...]
9883 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
9884   int Width = 4;
9885 
9886   if (isThumb()) {
9887     switch (Suffix) {
9888     case 'n':
9889       Width = 2;
9890       break;
9891     case 'w':
9892       break;
9893     default:
9894       return Error(Loc, "cannot determine Thumb instruction size, "
9895                         "use inst.n/inst.w instead");
9896     }
9897   } else {
9898     if (Suffix)
9899       return Error(Loc, "width suffixes are invalid in ARM mode");
9900   }
9901 
9902   auto parseOne = [&]() -> bool {
9903     const MCExpr *Expr;
9904     if (getParser().parseExpression(Expr))
9905       return true;
9906     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
9907     if (!Value) {
9908       return Error(Loc, "expected constant expression");
9909     }
9910 
9911     switch (Width) {
9912     case 2:
9913       if (Value->getValue() > 0xffff)
9914         return Error(Loc, "inst.n operand is too big, use inst.w instead");
9915       break;
9916     case 4:
9917       if (Value->getValue() > 0xffffffff)
9918         return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
9919                               " operand is too big");
9920       break;
9921     default:
9922       llvm_unreachable("only supported widths are 2 and 4");
9923     }
9924 
9925     getTargetStreamer().emitInst(Value->getValue(), Suffix);
9926     return false;
9927   };
9928 
9929   if (parseOptionalToken(AsmToken::EndOfStatement))
9930     return Error(Loc, "expected expression following directive");
9931   if (parseMany(parseOne))
9932     return true;
9933   return false;
9934 }
9935 
9936 /// parseDirectiveLtorg
9937 ///  ::= .ltorg | .pool
9938 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
9939   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
9940     return true;
9941   getTargetStreamer().emitCurrentConstantPool();
9942   return false;
9943 }
9944 
9945 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
9946   const MCSection *Section = getStreamer().getCurrentSectionOnly();
9947 
9948   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
9949     return true;
9950 
9951   if (!Section) {
9952     getStreamer().InitSections(false);
9953     Section = getStreamer().getCurrentSectionOnly();
9954   }
9955 
9956   assert(Section && "must have section to emit alignment");
9957   if (Section->UseCodeAlign())
9958     getStreamer().EmitCodeAlignment(2);
9959   else
9960     getStreamer().EmitValueToAlignment(2);
9961 
9962   return false;
9963 }
9964 
9965 /// parseDirectivePersonalityIndex
9966 ///   ::= .personalityindex index
9967 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
9968   MCAsmParser &Parser = getParser();
9969   bool HasExistingPersonality = UC.hasPersonality();
9970 
9971   const MCExpr *IndexExpression;
9972   SMLoc IndexLoc = Parser.getTok().getLoc();
9973   if (Parser.parseExpression(IndexExpression) ||
9974       parseToken(AsmToken::EndOfStatement,
9975                  "unexpected token in '.personalityindex' directive")) {
9976     return true;
9977   }
9978 
9979   UC.recordPersonalityIndex(L);
9980 
9981   if (!UC.hasFnStart()) {
9982     return Error(L, ".fnstart must precede .personalityindex directive");
9983   }
9984   if (UC.cantUnwind()) {
9985     Error(L, ".personalityindex cannot be used with .cantunwind");
9986     UC.emitCantUnwindLocNotes();
9987     return true;
9988   }
9989   if (UC.hasHandlerData()) {
9990     Error(L, ".personalityindex must precede .handlerdata directive");
9991     UC.emitHandlerDataLocNotes();
9992     return true;
9993   }
9994   if (HasExistingPersonality) {
9995     Error(L, "multiple personality directives");
9996     UC.emitPersonalityLocNotes();
9997     return true;
9998   }
9999 
10000   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
10001   if (!CE)
10002     return Error(IndexLoc, "index must be a constant number");
10003   if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
10004     return Error(IndexLoc,
10005                  "personality routine index should be in range [0-3]");
10006 
10007   getTargetStreamer().emitPersonalityIndex(CE->getValue());
10008   return false;
10009 }
10010 
10011 /// parseDirectiveUnwindRaw
10012 ///   ::= .unwind_raw offset, opcode [, opcode...]
10013 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
10014   MCAsmParser &Parser = getParser();
10015   int64_t StackOffset;
10016   const MCExpr *OffsetExpr;
10017   SMLoc OffsetLoc = getLexer().getLoc();
10018 
10019   if (!UC.hasFnStart())
10020     return Error(L, ".fnstart must precede .unwind_raw directives");
10021   if (getParser().parseExpression(OffsetExpr))
10022     return Error(OffsetLoc, "expected expression");
10023 
10024   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10025   if (!CE)
10026     return Error(OffsetLoc, "offset must be a constant");
10027 
10028   StackOffset = CE->getValue();
10029 
10030   if (Parser.parseToken(AsmToken::Comma, "expected comma"))
10031     return true;
10032 
10033   SmallVector<uint8_t, 16> Opcodes;
10034 
10035   auto parseOne = [&]() -> bool {
10036     const MCExpr *OE;
10037     SMLoc OpcodeLoc = getLexer().getLoc();
10038     if (check(getLexer().is(AsmToken::EndOfStatement) ||
10039                   Parser.parseExpression(OE),
10040               OpcodeLoc, "expected opcode expression"))
10041       return true;
10042     const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
10043     if (!OC)
10044       return Error(OpcodeLoc, "opcode value must be a constant");
10045     const int64_t Opcode = OC->getValue();
10046     if (Opcode & ~0xff)
10047       return Error(OpcodeLoc, "invalid opcode");
10048     Opcodes.push_back(uint8_t(Opcode));
10049     return false;
10050   };
10051 
10052   // Must have at least 1 element
10053   SMLoc OpcodeLoc = getLexer().getLoc();
10054   if (parseOptionalToken(AsmToken::EndOfStatement))
10055     return Error(OpcodeLoc, "expected opcode expression");
10056   if (parseMany(parseOne))
10057     return true;
10058 
10059   getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
10060   return false;
10061 }
10062 
10063 /// parseDirectiveTLSDescSeq
10064 ///   ::= .tlsdescseq tls-variable
10065 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
10066   MCAsmParser &Parser = getParser();
10067 
10068   if (getLexer().isNot(AsmToken::Identifier))
10069     return TokError("expected variable after '.tlsdescseq' directive");
10070 
10071   const MCSymbolRefExpr *SRE =
10072     MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
10073                             MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
10074   Lex();
10075 
10076   if (parseToken(AsmToken::EndOfStatement,
10077                  "unexpected token in '.tlsdescseq' directive"))
10078     return true;
10079 
10080   getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
10081   return false;
10082 }
10083 
10084 /// parseDirectiveMovSP
10085 ///  ::= .movsp reg [, #offset]
10086 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
10087   MCAsmParser &Parser = getParser();
10088   if (!UC.hasFnStart())
10089     return Error(L, ".fnstart must precede .movsp directives");
10090   if (UC.getFPReg() != ARM::SP)
10091     return Error(L, "unexpected .movsp directive");
10092 
10093   SMLoc SPRegLoc = Parser.getTok().getLoc();
10094   int SPReg = tryParseRegister();
10095   if (SPReg == -1)
10096     return Error(SPRegLoc, "register expected");
10097   if (SPReg == ARM::SP || SPReg == ARM::PC)
10098     return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
10099 
10100   int64_t Offset = 0;
10101   if (Parser.parseOptionalToken(AsmToken::Comma)) {
10102     if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
10103       return true;
10104 
10105     const MCExpr *OffsetExpr;
10106     SMLoc OffsetLoc = Parser.getTok().getLoc();
10107 
10108     if (Parser.parseExpression(OffsetExpr))
10109       return Error(OffsetLoc, "malformed offset expression");
10110 
10111     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10112     if (!CE)
10113       return Error(OffsetLoc, "offset must be an immediate constant");
10114 
10115     Offset = CE->getValue();
10116   }
10117 
10118   if (parseToken(AsmToken::EndOfStatement,
10119                  "unexpected token in '.movsp' directive"))
10120     return true;
10121 
10122   getTargetStreamer().emitMovSP(SPReg, Offset);
10123   UC.saveFPReg(SPReg);
10124 
10125   return false;
10126 }
10127 
10128 /// parseDirectiveObjectArch
10129 ///   ::= .object_arch name
10130 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
10131   MCAsmParser &Parser = getParser();
10132   if (getLexer().isNot(AsmToken::Identifier))
10133     return Error(getLexer().getLoc(), "unexpected token");
10134 
10135   StringRef Arch = Parser.getTok().getString();
10136   SMLoc ArchLoc = Parser.getTok().getLoc();
10137   Lex();
10138 
10139   ARM::ArchKind ID = ARM::parseArch(Arch);
10140 
10141   if (ID == ARM::ArchKind::INVALID)
10142     return Error(ArchLoc, "unknown architecture '" + Arch + "'");
10143   if (parseToken(AsmToken::EndOfStatement))
10144     return true;
10145 
10146   getTargetStreamer().emitObjectArch(ID);
10147   return false;
10148 }
10149 
10150 /// parseDirectiveAlign
10151 ///   ::= .align
10152 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
10153   // NOTE: if this is not the end of the statement, fall back to the target
10154   // agnostic handling for this directive which will correctly handle this.
10155   if (parseOptionalToken(AsmToken::EndOfStatement)) {
10156     // '.align' is target specifically handled to mean 2**2 byte alignment.
10157     const MCSection *Section = getStreamer().getCurrentSectionOnly();
10158     assert(Section && "must have section to emit alignment");
10159     if (Section->UseCodeAlign())
10160       getStreamer().EmitCodeAlignment(4, 0);
10161     else
10162       getStreamer().EmitValueToAlignment(4, 0, 1, 0);
10163     return false;
10164   }
10165   return true;
10166 }
10167 
10168 /// parseDirectiveThumbSet
10169 ///  ::= .thumb_set name, value
10170 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
10171   MCAsmParser &Parser = getParser();
10172 
10173   StringRef Name;
10174   if (check(Parser.parseIdentifier(Name),
10175             "expected identifier after '.thumb_set'") ||
10176       parseToken(AsmToken::Comma, "expected comma after name '" + Name + "'"))
10177     return true;
10178 
10179   MCSymbol *Sym;
10180   const MCExpr *Value;
10181   if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
10182                                                Parser, Sym, Value))
10183     return true;
10184 
10185   getTargetStreamer().emitThumbSet(Sym, Value);
10186   return false;
10187 }
10188 
10189 /// Force static initialization.
10190 extern "C" void LLVMInitializeARMAsmParser() {
10191   RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
10192   RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
10193   RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
10194   RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
10195 }
10196 
10197 #define GET_REGISTER_MATCHER
10198 #define GET_SUBTARGET_FEATURE_NAME
10199 #define GET_MATCHER_IMPLEMENTATION
10200 #define GET_MNEMONIC_SPELL_CHECKER
10201 #include "ARMGenAsmMatcher.inc"
10202 
10203 // Some diagnostics need to vary with subtarget features, so they are handled
10204 // here. For example, the DPR class has either 16 or 32 registers, depending
10205 // on the FPU available.
10206 const char *
10207 ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
10208   switch (MatchError) {
10209   // rGPR contains sp starting with ARMv8.
10210   case Match_rGPR:
10211     return hasV8Ops() ? "operand must be a register in range [r0, r14]"
10212                       : "operand must be a register in range [r0, r12] or r14";
10213   // DPR contains 16 registers for some FPUs, and 32 for others.
10214   case Match_DPR:
10215     return hasD16() ? "operand must be a register in range [d0, d15]"
10216                     : "operand must be a register in range [d0, d31]";
10217   case Match_DPR_RegList:
10218     return hasD16() ? "operand must be a list of registers in range [d0, d15]"
10219                     : "operand must be a list of registers in range [d0, d31]";
10220 
10221   // For all other diags, use the static string from tablegen.
10222   default:
10223     return getMatchKindDiag(MatchError);
10224   }
10225 }
10226 
10227 // Process the list of near-misses, throwing away ones we don't want to report
10228 // to the user, and converting the rest to a source location and string that
10229 // should be reported.
10230 void
10231 ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
10232                                SmallVectorImpl<NearMissMessage> &NearMissesOut,
10233                                SMLoc IDLoc, OperandVector &Operands) {
10234   // TODO: If operand didn't match, sub in a dummy one and run target
10235   // predicate, so that we can avoid reporting near-misses that are invalid?
10236   // TODO: Many operand types dont have SuperClasses set, so we report
10237   // redundant ones.
10238   // TODO: Some operands are superclasses of registers (e.g.
10239   // MCK_RegShiftedImm), we don't have any way to represent that currently.
10240   // TODO: This is not all ARM-specific, can some of it be factored out?
10241 
10242   // Record some information about near-misses that we have already seen, so
10243   // that we can avoid reporting redundant ones. For example, if there are
10244   // variants of an instruction that take 8- and 16-bit immediates, we want
10245   // to only report the widest one.
10246   std::multimap<unsigned, unsigned> OperandMissesSeen;
10247   SmallSet<uint64_t, 4> FeatureMissesSeen;
10248   bool ReportedTooFewOperands = false;
10249 
10250   // Process the near-misses in reverse order, so that we see more general ones
10251   // first, and so can avoid emitting more specific ones.
10252   for (NearMissInfo &I : reverse(NearMissesIn)) {
10253     switch (I.getKind()) {
10254     case NearMissInfo::NearMissOperand: {
10255       SMLoc OperandLoc =
10256           ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
10257       const char *OperandDiag =
10258           getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
10259 
10260       // If we have already emitted a message for a superclass, don't also report
10261       // the sub-class. We consider all operand classes that we don't have a
10262       // specialised diagnostic for to be equal for the propose of this check,
10263       // so that we don't report the generic error multiple times on the same
10264       // operand.
10265       unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
10266       auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
10267       if (std::any_of(PrevReports.first, PrevReports.second,
10268                       [DupCheckMatchClass](
10269                           const std::pair<unsigned, unsigned> Pair) {
10270             if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
10271               return Pair.second == DupCheckMatchClass;
10272             else
10273               return isSubclass((MatchClassKind)DupCheckMatchClass,
10274                                 (MatchClassKind)Pair.second);
10275           }))
10276         break;
10277       OperandMissesSeen.insert(
10278           std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
10279 
10280       NearMissMessage Message;
10281       Message.Loc = OperandLoc;
10282       if (OperandDiag) {
10283         Message.Message = OperandDiag;
10284       } else if (I.getOperandClass() == InvalidMatchClass) {
10285         Message.Message = "too many operands for instruction";
10286       } else {
10287         Message.Message = "invalid operand for instruction";
10288         DEBUG(dbgs() << "Missing diagnostic string for operand class " <<
10289               getMatchClassName((MatchClassKind)I.getOperandClass())
10290               << I.getOperandClass() << ", error " << I.getOperandError()
10291               << ", opcode " << MII.getName(I.getOpcode()) << "\n");
10292       }
10293       NearMissesOut.emplace_back(Message);
10294       break;
10295     }
10296     case NearMissInfo::NearMissFeature: {
10297       uint64_t MissingFeatures = I.getFeatures();
10298       // Don't report the same set of features twice.
10299       if (FeatureMissesSeen.count(MissingFeatures))
10300         break;
10301       FeatureMissesSeen.insert(MissingFeatures);
10302 
10303       // Special case: don't report a feature set which includes arm-mode for
10304       // targets that don't have ARM mode.
10305       if ((MissingFeatures & Feature_IsARM) && !hasARM())
10306         break;
10307       // Don't report any near-misses that both require switching instruction
10308       // set, and adding other subtarget features.
10309       if (isThumb() && (MissingFeatures & Feature_IsARM) &&
10310           (MissingFeatures & ~Feature_IsARM))
10311         break;
10312       if (!isThumb() && (MissingFeatures & Feature_IsThumb) &&
10313           (MissingFeatures & ~Feature_IsThumb))
10314         break;
10315       if (!isThumb() && (MissingFeatures & Feature_IsThumb2) &&
10316           (MissingFeatures & ~(Feature_IsThumb2 | Feature_IsThumb)))
10317         break;
10318       if (isMClass() && (MissingFeatures & Feature_HasNEON))
10319         break;
10320 
10321       NearMissMessage Message;
10322       Message.Loc = IDLoc;
10323       raw_svector_ostream OS(Message.Message);
10324 
10325       OS << "instruction requires:";
10326       uint64_t Mask = 1;
10327       for (unsigned MaskPos = 0; MaskPos < (sizeof(MissingFeatures) * 8 - 1);
10328            ++MaskPos) {
10329         if (MissingFeatures & Mask) {
10330           OS << " " << getSubtargetFeatureName(MissingFeatures & Mask);
10331         }
10332         Mask <<= 1;
10333       }
10334       NearMissesOut.emplace_back(Message);
10335 
10336       break;
10337     }
10338     case NearMissInfo::NearMissPredicate: {
10339       NearMissMessage Message;
10340       Message.Loc = IDLoc;
10341       switch (I.getPredicateError()) {
10342       case Match_RequiresNotITBlock:
10343         Message.Message = "flag setting instruction only valid outside IT block";
10344         break;
10345       case Match_RequiresITBlock:
10346         Message.Message = "instruction only valid inside IT block";
10347         break;
10348       case Match_RequiresV6:
10349         Message.Message = "instruction variant requires ARMv6 or later";
10350         break;
10351       case Match_RequiresThumb2:
10352         Message.Message = "instruction variant requires Thumb2";
10353         break;
10354       case Match_RequiresV8:
10355         Message.Message = "instruction variant requires ARMv8 or later";
10356         break;
10357       case Match_RequiresFlagSetting:
10358         Message.Message = "no flag-preserving variant of this instruction available";
10359         break;
10360       case Match_InvalidOperand:
10361         Message.Message = "invalid operand for instruction";
10362         break;
10363       default:
10364         llvm_unreachable("Unhandled target predicate error");
10365         break;
10366       }
10367       NearMissesOut.emplace_back(Message);
10368       break;
10369     }
10370     case NearMissInfo::NearMissTooFewOperands: {
10371       if (!ReportedTooFewOperands) {
10372         SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
10373         NearMissesOut.emplace_back(NearMissMessage{
10374             EndLoc, StringRef("too few operands for instruction")});
10375         ReportedTooFewOperands = true;
10376       }
10377       break;
10378     }
10379     case NearMissInfo::NoNearMiss:
10380       // This should never leave the matcher.
10381       llvm_unreachable("not a near-miss");
10382       break;
10383     }
10384   }
10385 }
10386 
10387 void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
10388                                     SMLoc IDLoc, OperandVector &Operands) {
10389   SmallVector<NearMissMessage, 4> Messages;
10390   FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
10391 
10392   if (Messages.size() == 0) {
10393     // No near-misses were found, so the best we can do is "invalid
10394     // instruction".
10395     Error(IDLoc, "invalid instruction");
10396   } else if (Messages.size() == 1) {
10397     // One near miss was found, report it as the sole error.
10398     Error(Messages[0].Loc, Messages[0].Message);
10399   } else {
10400     // More than one near miss, so report a generic "invalid instruction"
10401     // error, followed by notes for each of the near-misses.
10402     Error(IDLoc, "invalid instruction, any one of the following would fix this:");
10403     for (auto &M : Messages) {
10404       Note(M.Loc, M.Message);
10405     }
10406   }
10407 }
10408 
10409 // FIXME: This structure should be moved inside ARMTargetParser
10410 // when we start to table-generate them, and we can use the ARM
10411 // flags below, that were generated by table-gen.
10412 static const struct {
10413   const unsigned Kind;
10414   const uint64_t ArchCheck;
10415   const FeatureBitset Features;
10416 } Extensions[] = {
10417   { ARM::AEK_CRC, Feature_HasV8, {ARM::FeatureCRC} },
10418   { ARM::AEK_CRYPTO,  Feature_HasV8,
10419     {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
10420   { ARM::AEK_FP, Feature_HasV8, {ARM::FeatureFPARMv8} },
10421   { (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM), Feature_HasV7 | Feature_IsNotMClass,
10422     {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} },
10423   { ARM::AEK_MP, Feature_HasV7 | Feature_IsNotMClass, {ARM::FeatureMP} },
10424   { ARM::AEK_SIMD, Feature_HasV8, {ARM::FeatureNEON, ARM::FeatureFPARMv8} },
10425   { ARM::AEK_SEC, Feature_HasV6K, {ARM::FeatureTrustZone} },
10426   // FIXME: Only available in A-class, isel not predicated
10427   { ARM::AEK_VIRT, Feature_HasV7, {ARM::FeatureVirtualization} },
10428   { ARM::AEK_FP16, Feature_HasV8_2a, {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
10429   { ARM::AEK_RAS, Feature_HasV8, {ARM::FeatureRAS} },
10430   // FIXME: Unsupported extensions.
10431   { ARM::AEK_OS, Feature_None, {} },
10432   { ARM::AEK_IWMMXT, Feature_None, {} },
10433   { ARM::AEK_IWMMXT2, Feature_None, {} },
10434   { ARM::AEK_MAVERICK, Feature_None, {} },
10435   { ARM::AEK_XSCALE, Feature_None, {} },
10436 };
10437 
10438 /// parseDirectiveArchExtension
10439 ///   ::= .arch_extension [no]feature
10440 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
10441   MCAsmParser &Parser = getParser();
10442 
10443   if (getLexer().isNot(AsmToken::Identifier))
10444     return Error(getLexer().getLoc(), "expected architecture extension name");
10445 
10446   StringRef Name = Parser.getTok().getString();
10447   SMLoc ExtLoc = Parser.getTok().getLoc();
10448   Lex();
10449 
10450   if (parseToken(AsmToken::EndOfStatement,
10451                  "unexpected token in '.arch_extension' directive"))
10452     return true;
10453 
10454   bool EnableFeature = true;
10455   if (Name.startswith_lower("no")) {
10456     EnableFeature = false;
10457     Name = Name.substr(2);
10458   }
10459   unsigned FeatureKind = ARM::parseArchExt(Name);
10460   if (FeatureKind == ARM::AEK_INVALID)
10461     return Error(ExtLoc, "unknown architectural extension: " + Name);
10462 
10463   for (const auto &Extension : Extensions) {
10464     if (Extension.Kind != FeatureKind)
10465       continue;
10466 
10467     if (Extension.Features.none())
10468       return Error(ExtLoc, "unsupported architectural extension: " + Name);
10469 
10470     if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
10471       return Error(ExtLoc, "architectural extension '" + Name +
10472                                "' is not "
10473                                "allowed for the current base architecture");
10474 
10475     MCSubtargetInfo &STI = copySTI();
10476     FeatureBitset ToggleFeatures = EnableFeature
10477       ? (~STI.getFeatureBits() & Extension.Features)
10478       : ( STI.getFeatureBits() & Extension.Features);
10479 
10480     uint64_t Features =
10481         ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
10482     setAvailableFeatures(Features);
10483     return false;
10484   }
10485 
10486   return Error(ExtLoc, "unknown architectural extension: " + Name);
10487 }
10488 
10489 // Define this matcher function after the auto-generated include so we
10490 // have the match class enum definitions.
10491 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
10492                                                   unsigned Kind) {
10493   ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
10494   // If the kind is a token for a literal immediate, check if our asm
10495   // operand matches. This is for InstAliases which have a fixed-value
10496   // immediate in the syntax.
10497   switch (Kind) {
10498   default: break;
10499   case MCK__35_0:
10500     if (Op.isImm())
10501       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
10502         if (CE->getValue() == 0)
10503           return Match_Success;
10504     break;
10505   case MCK_ModImm:
10506     if (Op.isImm()) {
10507       const MCExpr *SOExpr = Op.getImm();
10508       int64_t Value;
10509       if (!SOExpr->evaluateAsAbsolute(Value))
10510         return Match_Success;
10511       assert((Value >= std::numeric_limits<int32_t>::min() &&
10512               Value <= std::numeric_limits<uint32_t>::max()) &&
10513              "expression value must be representable in 32 bits");
10514     }
10515     break;
10516   case MCK_rGPR:
10517     if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
10518       return Match_Success;
10519     return Match_rGPR;
10520   case MCK_GPRPair:
10521     if (Op.isReg() &&
10522         MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
10523       return Match_Success;
10524     break;
10525   }
10526   return Match_InvalidOperand;
10527 }
10528