1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMFeatures.h"
10 #include "Utils/ARMBaseInfo.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMBaseInfo.h"
13 #include "MCTargetDesc/ARMInstPrinter.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "TargetInfo/ARMTargetInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringMap.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/StringSwitch.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCInstrDesc.h"
32 #include "llvm/MC/MCInstrInfo.h"
33 #include "llvm/MC/MCObjectFileInfo.h"
34 #include "llvm/MC/MCParser/MCAsmLexer.h"
35 #include "llvm/MC/MCParser/MCAsmParser.h"
36 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
37 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
38 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
39 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
40 #include "llvm/MC/MCRegisterInfo.h"
41 #include "llvm/MC/MCSection.h"
42 #include "llvm/MC/MCStreamer.h"
43 #include "llvm/MC/MCSubtargetInfo.h"
44 #include "llvm/MC/MCSymbol.h"
45 #include "llvm/MC/SubtargetFeature.h"
46 #include "llvm/Support/ARMBuildAttributes.h"
47 #include "llvm/Support/ARMEHABI.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/SMLoc.h"
54 #include "llvm/Support/TargetParser.h"
55 #include "llvm/Support/TargetRegistry.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstddef>
60 #include <cstdint>
61 #include <iterator>
62 #include <limits>
63 #include <memory>
64 #include <string>
65 #include <utility>
66 #include <vector>
67 
68 #define DEBUG_TYPE "asm-parser"
69 
70 using namespace llvm;
71 
72 namespace llvm {
73 extern const MCInstrDesc ARMInsts[];
74 } // end namespace llvm
75 
76 namespace {
77 
78 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
79 
80 static cl::opt<ImplicitItModeTy> ImplicitItMode(
81     "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
82     cl::desc("Allow conditional instructions outdside of an IT block"),
83     cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
84                           "Accept in both ISAs, emit implicit ITs in Thumb"),
85                clEnumValN(ImplicitItModeTy::Never, "never",
86                           "Warn in ARM, reject in Thumb"),
87                clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
88                           "Accept in ARM, reject in Thumb"),
89                clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
90                           "Warn in ARM, emit implicit ITs in Thumb")));
91 
92 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
93                                         cl::init(false));
94 
95 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
96 
97 static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
98   // Position==0 means we're not in an IT block at all. Position==1
99   // means we want the first state bit, which is always 0 (Then).
100   // Position==2 means we want the second state bit, stored at bit 3
101   // of Mask, and so on downwards. So (5 - Position) will shift the
102   // right bit down to bit 0, including the always-0 bit at bit 4 for
103   // the mandatory initial Then.
104   return (Mask >> (5 - Position) & 1);
105 }
106 
107 class UnwindContext {
108   using Locs = SmallVector<SMLoc, 4>;
109 
110   MCAsmParser &Parser;
111   Locs FnStartLocs;
112   Locs CantUnwindLocs;
113   Locs PersonalityLocs;
114   Locs PersonalityIndexLocs;
115   Locs HandlerDataLocs;
116   int FPReg;
117 
118 public:
119   UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
120 
121   bool hasFnStart() const { return !FnStartLocs.empty(); }
122   bool cantUnwind() const { return !CantUnwindLocs.empty(); }
123   bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
124 
125   bool hasPersonality() const {
126     return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
127   }
128 
129   void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
130   void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
131   void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
132   void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
133   void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
134 
135   void saveFPReg(int Reg) { FPReg = Reg; }
136   int getFPReg() const { return FPReg; }
137 
138   void emitFnStartLocNotes() const {
139     for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
140          FI != FE; ++FI)
141       Parser.Note(*FI, ".fnstart was specified here");
142   }
143 
144   void emitCantUnwindLocNotes() const {
145     for (Locs::const_iterator UI = CantUnwindLocs.begin(),
146                               UE = CantUnwindLocs.end(); UI != UE; ++UI)
147       Parser.Note(*UI, ".cantunwind was specified here");
148   }
149 
150   void emitHandlerDataLocNotes() const {
151     for (Locs::const_iterator HI = HandlerDataLocs.begin(),
152                               HE = HandlerDataLocs.end(); HI != HE; ++HI)
153       Parser.Note(*HI, ".handlerdata was specified here");
154   }
155 
156   void emitPersonalityLocNotes() const {
157     for (Locs::const_iterator PI = PersonalityLocs.begin(),
158                               PE = PersonalityLocs.end(),
159                               PII = PersonalityIndexLocs.begin(),
160                               PIE = PersonalityIndexLocs.end();
161          PI != PE || PII != PIE;) {
162       if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
163         Parser.Note(*PI++, ".personality was specified here");
164       else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
165         Parser.Note(*PII++, ".personalityindex was specified here");
166       else
167         llvm_unreachable(".personality and .personalityindex cannot be "
168                          "at the same location");
169     }
170   }
171 
172   void reset() {
173     FnStartLocs = Locs();
174     CantUnwindLocs = Locs();
175     PersonalityLocs = Locs();
176     HandlerDataLocs = Locs();
177     PersonalityIndexLocs = Locs();
178     FPReg = ARM::SP;
179   }
180 };
181 
182 
183 class ARMAsmParser : public MCTargetAsmParser {
184   const MCRegisterInfo *MRI;
185   UnwindContext UC;
186 
187   ARMTargetStreamer &getTargetStreamer() {
188     assert(getParser().getStreamer().getTargetStreamer() &&
189            "do not have a target streamer");
190     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
191     return static_cast<ARMTargetStreamer &>(TS);
192   }
193 
194   // Map of register aliases registers via the .req directive.
195   StringMap<unsigned> RegisterReqs;
196 
197   bool NextSymbolIsThumb;
198 
199   bool useImplicitITThumb() const {
200     return ImplicitItMode == ImplicitItModeTy::Always ||
201            ImplicitItMode == ImplicitItModeTy::ThumbOnly;
202   }
203 
204   bool useImplicitITARM() const {
205     return ImplicitItMode == ImplicitItModeTy::Always ||
206            ImplicitItMode == ImplicitItModeTy::ARMOnly;
207   }
208 
209   struct {
210     ARMCC::CondCodes Cond;    // Condition for IT block.
211     unsigned Mask:4;          // Condition mask for instructions.
212                               // Starting at first 1 (from lsb).
213                               //   '1'  condition as indicated in IT.
214                               //   '0'  inverse of condition (else).
215                               // Count of instructions in IT block is
216                               // 4 - trailingzeroes(mask)
217                               // Note that this does not have the same encoding
218                               // as in the IT instruction, which also depends
219                               // on the low bit of the condition code.
220 
221     unsigned CurPosition;     // Current position in parsing of IT
222                               // block. In range [0,4], with 0 being the IT
223                               // instruction itself. Initialized according to
224                               // count of instructions in block.  ~0U if no
225                               // active IT block.
226 
227     bool IsExplicit;          // true  - The IT instruction was present in the
228                               //         input, we should not modify it.
229                               // false - The IT instruction was added
230                               //         implicitly, we can extend it if that
231                               //         would be legal.
232   } ITState;
233 
234   SmallVector<MCInst, 4> PendingConditionalInsts;
235 
236   void flushPendingInstructions(MCStreamer &Out) override {
237     if (!inImplicitITBlock()) {
238       assert(PendingConditionalInsts.size() == 0);
239       return;
240     }
241 
242     // Emit the IT instruction
243     MCInst ITInst;
244     ITInst.setOpcode(ARM::t2IT);
245     ITInst.addOperand(MCOperand::createImm(ITState.Cond));
246     ITInst.addOperand(MCOperand::createImm(ITState.Mask));
247     Out.EmitInstruction(ITInst, getSTI());
248 
249     // Emit the conditonal instructions
250     assert(PendingConditionalInsts.size() <= 4);
251     for (const MCInst &Inst : PendingConditionalInsts) {
252       Out.EmitInstruction(Inst, getSTI());
253     }
254     PendingConditionalInsts.clear();
255 
256     // Clear the IT state
257     ITState.Mask = 0;
258     ITState.CurPosition = ~0U;
259   }
260 
261   bool inITBlock() { return ITState.CurPosition != ~0U; }
262   bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
263   bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
264 
265   bool lastInITBlock() {
266     return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
267   }
268 
269   void forwardITPosition() {
270     if (!inITBlock()) return;
271     // Move to the next instruction in the IT block, if there is one. If not,
272     // mark the block as done, except for implicit IT blocks, which we leave
273     // open until we find an instruction that can't be added to it.
274     unsigned TZ = countTrailingZeros(ITState.Mask);
275     if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
276       ITState.CurPosition = ~0U; // Done with the IT block after this.
277   }
278 
279   // Rewind the state of the current IT block, removing the last slot from it.
280   void rewindImplicitITPosition() {
281     assert(inImplicitITBlock());
282     assert(ITState.CurPosition > 1);
283     ITState.CurPosition--;
284     unsigned TZ = countTrailingZeros(ITState.Mask);
285     unsigned NewMask = 0;
286     NewMask |= ITState.Mask & (0xC << TZ);
287     NewMask |= 0x2 << TZ;
288     ITState.Mask = NewMask;
289   }
290 
291   // Rewind the state of the current IT block, removing the last slot from it.
292   // If we were at the first slot, this closes the IT block.
293   void discardImplicitITBlock() {
294     assert(inImplicitITBlock());
295     assert(ITState.CurPosition == 1);
296     ITState.CurPosition = ~0U;
297   }
298 
299   // Return the low-subreg of a given Q register.
300   unsigned getDRegFromQReg(unsigned QReg) const {
301     return MRI->getSubReg(QReg, ARM::dsub_0);
302   }
303 
304   // Get the condition code corresponding to the current IT block slot.
305   ARMCC::CondCodes currentITCond() {
306     unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
307     return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
308   }
309 
310   // Invert the condition of the current IT block slot without changing any
311   // other slots in the same block.
312   void invertCurrentITCondition() {
313     if (ITState.CurPosition == 1) {
314       ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
315     } else {
316       ITState.Mask ^= 1 << (5 - ITState.CurPosition);
317     }
318   }
319 
320   // Returns true if the current IT block is full (all 4 slots used).
321   bool isITBlockFull() {
322     return inITBlock() && (ITState.Mask & 1);
323   }
324 
325   // Extend the current implicit IT block to have one more slot with the given
326   // condition code.
327   void extendImplicitITBlock(ARMCC::CondCodes Cond) {
328     assert(inImplicitITBlock());
329     assert(!isITBlockFull());
330     assert(Cond == ITState.Cond ||
331            Cond == ARMCC::getOppositeCondition(ITState.Cond));
332     unsigned TZ = countTrailingZeros(ITState.Mask);
333     unsigned NewMask = 0;
334     // Keep any existing condition bits.
335     NewMask |= ITState.Mask & (0xE << TZ);
336     // Insert the new condition bit.
337     NewMask |= (Cond != ITState.Cond) << TZ;
338     // Move the trailing 1 down one bit.
339     NewMask |= 1 << (TZ - 1);
340     ITState.Mask = NewMask;
341   }
342 
343   // Create a new implicit IT block with a dummy condition code.
344   void startImplicitITBlock() {
345     assert(!inITBlock());
346     ITState.Cond = ARMCC::AL;
347     ITState.Mask = 8;
348     ITState.CurPosition = 1;
349     ITState.IsExplicit = false;
350   }
351 
352   // Create a new explicit IT block with the given condition and mask.
353   // The mask should be in the format used in ARMOperand and
354   // MCOperand, with a 1 implying 'e', regardless of the low bit of
355   // the condition.
356   void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
357     assert(!inITBlock());
358     ITState.Cond = Cond;
359     ITState.Mask = Mask;
360     ITState.CurPosition = 0;
361     ITState.IsExplicit = true;
362   }
363 
364   struct {
365     unsigned Mask : 4;
366     unsigned CurPosition;
367   } VPTState;
368   bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
369   void forwardVPTPosition() {
370     if (!inVPTBlock()) return;
371     unsigned TZ = countTrailingZeros(VPTState.Mask);
372     if (++VPTState.CurPosition == 5 - TZ)
373       VPTState.CurPosition = ~0U;
374   }
375 
376   void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
377     return getParser().Note(L, Msg, Range);
378   }
379 
380   bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
381     return getParser().Warning(L, Msg, Range);
382   }
383 
384   bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
385     return getParser().Error(L, Msg, Range);
386   }
387 
388   bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
389                            unsigned ListNo, bool IsARPop = false);
390   bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
391                            unsigned ListNo);
392 
393   int tryParseRegister();
394   bool tryParseRegisterWithWriteBack(OperandVector &);
395   int tryParseShiftRegister(OperandVector &);
396   bool parseRegisterList(OperandVector &, bool EnforceOrder = true);
397   bool parseMemory(OperandVector &);
398   bool parseOperand(OperandVector &, StringRef Mnemonic);
399   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
400   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
401                               unsigned &ShiftAmount);
402   bool parseLiteralValues(unsigned Size, SMLoc L);
403   bool parseDirectiveThumb(SMLoc L);
404   bool parseDirectiveARM(SMLoc L);
405   bool parseDirectiveThumbFunc(SMLoc L);
406   bool parseDirectiveCode(SMLoc L);
407   bool parseDirectiveSyntax(SMLoc L);
408   bool parseDirectiveReq(StringRef Name, SMLoc L);
409   bool parseDirectiveUnreq(SMLoc L);
410   bool parseDirectiveArch(SMLoc L);
411   bool parseDirectiveEabiAttr(SMLoc L);
412   bool parseDirectiveCPU(SMLoc L);
413   bool parseDirectiveFPU(SMLoc L);
414   bool parseDirectiveFnStart(SMLoc L);
415   bool parseDirectiveFnEnd(SMLoc L);
416   bool parseDirectiveCantUnwind(SMLoc L);
417   bool parseDirectivePersonality(SMLoc L);
418   bool parseDirectiveHandlerData(SMLoc L);
419   bool parseDirectiveSetFP(SMLoc L);
420   bool parseDirectivePad(SMLoc L);
421   bool parseDirectiveRegSave(SMLoc L, bool IsVector);
422   bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
423   bool parseDirectiveLtorg(SMLoc L);
424   bool parseDirectiveEven(SMLoc L);
425   bool parseDirectivePersonalityIndex(SMLoc L);
426   bool parseDirectiveUnwindRaw(SMLoc L);
427   bool parseDirectiveTLSDescSeq(SMLoc L);
428   bool parseDirectiveMovSP(SMLoc L);
429   bool parseDirectiveObjectArch(SMLoc L);
430   bool parseDirectiveArchExtension(SMLoc L);
431   bool parseDirectiveAlign(SMLoc L);
432   bool parseDirectiveThumbSet(SMLoc L);
433 
434   bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
435   StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
436                           unsigned &PredicationCode,
437                           unsigned &VPTPredicationCode, bool &CarrySetting,
438                           unsigned &ProcessorIMod, StringRef &ITMask);
439   void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
440                              StringRef FullInst, bool &CanAcceptCarrySet,
441                              bool &CanAcceptPredicationCode,
442                              bool &CanAcceptVPTPredicationCode);
443 
444   void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
445                                      OperandVector &Operands);
446   bool isThumb() const {
447     // FIXME: Can tablegen auto-generate this?
448     return getSTI().getFeatureBits()[ARM::ModeThumb];
449   }
450 
451   bool isThumbOne() const {
452     return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
453   }
454 
455   bool isThumbTwo() const {
456     return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
457   }
458 
459   bool hasThumb() const {
460     return getSTI().getFeatureBits()[ARM::HasV4TOps];
461   }
462 
463   bool hasThumb2() const {
464     return getSTI().getFeatureBits()[ARM::FeatureThumb2];
465   }
466 
467   bool hasV6Ops() const {
468     return getSTI().getFeatureBits()[ARM::HasV6Ops];
469   }
470 
471   bool hasV6T2Ops() const {
472     return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
473   }
474 
475   bool hasV6MOps() const {
476     return getSTI().getFeatureBits()[ARM::HasV6MOps];
477   }
478 
479   bool hasV7Ops() const {
480     return getSTI().getFeatureBits()[ARM::HasV7Ops];
481   }
482 
483   bool hasV8Ops() const {
484     return getSTI().getFeatureBits()[ARM::HasV8Ops];
485   }
486 
487   bool hasV8MBaseline() const {
488     return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
489   }
490 
491   bool hasV8MMainline() const {
492     return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
493   }
494   bool hasV8_1MMainline() const {
495     return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
496   }
497   bool hasMVE() const {
498     return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
499   }
500   bool hasMVEFloat() const {
501     return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
502   }
503   bool has8MSecExt() const {
504     return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
505   }
506 
507   bool hasARM() const {
508     return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
509   }
510 
511   bool hasDSP() const {
512     return getSTI().getFeatureBits()[ARM::FeatureDSP];
513   }
514 
515   bool hasD32() const {
516     return getSTI().getFeatureBits()[ARM::FeatureD32];
517   }
518 
519   bool hasV8_1aOps() const {
520     return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
521   }
522 
523   bool hasRAS() const {
524     return getSTI().getFeatureBits()[ARM::FeatureRAS];
525   }
526 
527   void SwitchMode() {
528     MCSubtargetInfo &STI = copySTI();
529     auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
530     setAvailableFeatures(FB);
531   }
532 
533   void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
534 
535   bool isMClass() const {
536     return getSTI().getFeatureBits()[ARM::FeatureMClass];
537   }
538 
539   /// @name Auto-generated Match Functions
540   /// {
541 
542 #define GET_ASSEMBLER_HEADER
543 #include "ARMGenAsmMatcher.inc"
544 
545   /// }
546 
547   OperandMatchResultTy parseITCondCode(OperandVector &);
548   OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
549   OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
550   OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
551   OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
552   OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
553   OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
554   OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
555   OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
556   OperandMatchResultTy parseBankedRegOperand(OperandVector &);
557   OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
558                                    int High);
559   OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
560     return parsePKHImm(O, "lsl", 0, 31);
561   }
562   OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
563     return parsePKHImm(O, "asr", 1, 32);
564   }
565   OperandMatchResultTy parseSetEndImm(OperandVector &);
566   OperandMatchResultTy parseShifterImm(OperandVector &);
567   OperandMatchResultTy parseRotImm(OperandVector &);
568   OperandMatchResultTy parseModImm(OperandVector &);
569   OperandMatchResultTy parseBitfield(OperandVector &);
570   OperandMatchResultTy parsePostIdxReg(OperandVector &);
571   OperandMatchResultTy parseAM3Offset(OperandVector &);
572   OperandMatchResultTy parseFPImm(OperandVector &);
573   OperandMatchResultTy parseVectorList(OperandVector &);
574   OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
575                                        SMLoc &EndLoc);
576 
577   // Asm Match Converter Methods
578   void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
579   void cvtThumbBranches(MCInst &Inst, const OperandVector &);
580 
581   bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
582   bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
583   bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
584   bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
585   bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
586   bool isITBlockTerminator(MCInst &Inst) const;
587   void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
588   bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
589                         bool Load, bool ARMMode, bool Writeback);
590 
591 public:
592   enum ARMMatchResultTy {
593     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
594     Match_RequiresNotITBlock,
595     Match_RequiresV6,
596     Match_RequiresThumb2,
597     Match_RequiresV8,
598     Match_RequiresFlagSetting,
599 #define GET_OPERAND_DIAGNOSTIC_TYPES
600 #include "ARMGenAsmMatcher.inc"
601 
602   };
603 
604   ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
605                const MCInstrInfo &MII, const MCTargetOptions &Options)
606     : MCTargetAsmParser(Options, STI, MII), UC(Parser) {
607     MCAsmParserExtension::Initialize(Parser);
608 
609     // Cache the MCRegisterInfo.
610     MRI = getContext().getRegisterInfo();
611 
612     // Initialize the set of available features.
613     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
614 
615     // Add build attributes based on the selected target.
616     if (AddBuildAttributes)
617       getTargetStreamer().emitTargetAttributes(STI);
618 
619     // Not in an ITBlock to start with.
620     ITState.CurPosition = ~0U;
621 
622     VPTState.CurPosition = ~0U;
623 
624     NextSymbolIsThumb = false;
625   }
626 
627   // Implementation of the MCTargetAsmParser interface:
628   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
629   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
630                         SMLoc NameLoc, OperandVector &Operands) override;
631   bool ParseDirective(AsmToken DirectiveID) override;
632 
633   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
634                                       unsigned Kind) override;
635   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
636 
637   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
638                                OperandVector &Operands, MCStreamer &Out,
639                                uint64_t &ErrorInfo,
640                                bool MatchingInlineAsm) override;
641   unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
642                             SmallVectorImpl<NearMissInfo> &NearMisses,
643                             bool MatchingInlineAsm, bool &EmitInITBlock,
644                             MCStreamer &Out);
645 
646   struct NearMissMessage {
647     SMLoc Loc;
648     SmallString<128> Message;
649   };
650 
651   const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
652 
653   void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
654                         SmallVectorImpl<NearMissMessage> &NearMissesOut,
655                         SMLoc IDLoc, OperandVector &Operands);
656   void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
657                         OperandVector &Operands);
658 
659   void doBeforeLabelEmit(MCSymbol *Symbol) override;
660 
661   void onLabelParsed(MCSymbol *Symbol) override;
662 };
663 
664 /// ARMOperand - Instances of this class represent a parsed ARM machine
665 /// operand.
666 class ARMOperand : public MCParsedAsmOperand {
667   enum KindTy {
668     k_CondCode,
669     k_VPTPred,
670     k_CCOut,
671     k_ITCondMask,
672     k_CoprocNum,
673     k_CoprocReg,
674     k_CoprocOption,
675     k_Immediate,
676     k_MemBarrierOpt,
677     k_InstSyncBarrierOpt,
678     k_TraceSyncBarrierOpt,
679     k_Memory,
680     k_PostIndexRegister,
681     k_MSRMask,
682     k_BankedReg,
683     k_ProcIFlags,
684     k_VectorIndex,
685     k_Register,
686     k_RegisterList,
687     k_RegisterListWithAPSR,
688     k_DPRRegisterList,
689     k_SPRRegisterList,
690     k_FPSRegisterListWithVPR,
691     k_FPDRegisterListWithVPR,
692     k_VectorList,
693     k_VectorListAllLanes,
694     k_VectorListIndexed,
695     k_ShiftedRegister,
696     k_ShiftedImmediate,
697     k_ShifterImmediate,
698     k_RotateImmediate,
699     k_ModifiedImmediate,
700     k_ConstantPoolImmediate,
701     k_BitfieldDescriptor,
702     k_Token,
703   } Kind;
704 
705   SMLoc StartLoc, EndLoc, AlignmentLoc;
706   SmallVector<unsigned, 8> Registers;
707 
708   struct CCOp {
709     ARMCC::CondCodes Val;
710   };
711 
712   struct VCCOp {
713     ARMVCC::VPTCodes Val;
714   };
715 
716   struct CopOp {
717     unsigned Val;
718   };
719 
720   struct CoprocOptionOp {
721     unsigned Val;
722   };
723 
724   struct ITMaskOp {
725     unsigned Mask:4;
726   };
727 
728   struct MBOptOp {
729     ARM_MB::MemBOpt Val;
730   };
731 
732   struct ISBOptOp {
733     ARM_ISB::InstSyncBOpt Val;
734   };
735 
736   struct TSBOptOp {
737     ARM_TSB::TraceSyncBOpt Val;
738   };
739 
740   struct IFlagsOp {
741     ARM_PROC::IFlags Val;
742   };
743 
744   struct MMaskOp {
745     unsigned Val;
746   };
747 
748   struct BankedRegOp {
749     unsigned Val;
750   };
751 
752   struct TokOp {
753     const char *Data;
754     unsigned Length;
755   };
756 
757   struct RegOp {
758     unsigned RegNum;
759   };
760 
761   // A vector register list is a sequential list of 1 to 4 registers.
762   struct VectorListOp {
763     unsigned RegNum;
764     unsigned Count;
765     unsigned LaneIndex;
766     bool isDoubleSpaced;
767   };
768 
769   struct VectorIndexOp {
770     unsigned Val;
771   };
772 
773   struct ImmOp {
774     const MCExpr *Val;
775   };
776 
777   /// Combined record for all forms of ARM address expressions.
778   struct MemoryOp {
779     unsigned BaseRegNum;
780     // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
781     // was specified.
782     const MCConstantExpr *OffsetImm;  // Offset immediate value
783     unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
784     ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
785     unsigned ShiftImm;        // shift for OffsetReg.
786     unsigned Alignment;       // 0 = no alignment specified
787     // n = alignment in bytes (2, 4, 8, 16, or 32)
788     unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
789   };
790 
791   struct PostIdxRegOp {
792     unsigned RegNum;
793     bool isAdd;
794     ARM_AM::ShiftOpc ShiftTy;
795     unsigned ShiftImm;
796   };
797 
798   struct ShifterImmOp {
799     bool isASR;
800     unsigned Imm;
801   };
802 
803   struct RegShiftedRegOp {
804     ARM_AM::ShiftOpc ShiftTy;
805     unsigned SrcReg;
806     unsigned ShiftReg;
807     unsigned ShiftImm;
808   };
809 
810   struct RegShiftedImmOp {
811     ARM_AM::ShiftOpc ShiftTy;
812     unsigned SrcReg;
813     unsigned ShiftImm;
814   };
815 
816   struct RotImmOp {
817     unsigned Imm;
818   };
819 
820   struct ModImmOp {
821     unsigned Bits;
822     unsigned Rot;
823   };
824 
825   struct BitfieldOp {
826     unsigned LSB;
827     unsigned Width;
828   };
829 
830   union {
831     struct CCOp CC;
832     struct VCCOp VCC;
833     struct CopOp Cop;
834     struct CoprocOptionOp CoprocOption;
835     struct MBOptOp MBOpt;
836     struct ISBOptOp ISBOpt;
837     struct TSBOptOp TSBOpt;
838     struct ITMaskOp ITMask;
839     struct IFlagsOp IFlags;
840     struct MMaskOp MMask;
841     struct BankedRegOp BankedReg;
842     struct TokOp Tok;
843     struct RegOp Reg;
844     struct VectorListOp VectorList;
845     struct VectorIndexOp VectorIndex;
846     struct ImmOp Imm;
847     struct MemoryOp Memory;
848     struct PostIdxRegOp PostIdxReg;
849     struct ShifterImmOp ShifterImm;
850     struct RegShiftedRegOp RegShiftedReg;
851     struct RegShiftedImmOp RegShiftedImm;
852     struct RotImmOp RotImm;
853     struct ModImmOp ModImm;
854     struct BitfieldOp Bitfield;
855   };
856 
857 public:
858   ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
859 
860   /// getStartLoc - Get the location of the first token of this operand.
861   SMLoc getStartLoc() const override { return StartLoc; }
862 
863   /// getEndLoc - Get the location of the last token of this operand.
864   SMLoc getEndLoc() const override { return EndLoc; }
865 
866   /// getLocRange - Get the range between the first and last token of this
867   /// operand.
868   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
869 
870   /// getAlignmentLoc - Get the location of the Alignment token of this operand.
871   SMLoc getAlignmentLoc() const {
872     assert(Kind == k_Memory && "Invalid access!");
873     return AlignmentLoc;
874   }
875 
876   ARMCC::CondCodes getCondCode() const {
877     assert(Kind == k_CondCode && "Invalid access!");
878     return CC.Val;
879   }
880 
881   ARMVCC::VPTCodes getVPTPred() const {
882     assert(isVPTPred() && "Invalid access!");
883     return VCC.Val;
884   }
885 
886   unsigned getCoproc() const {
887     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
888     return Cop.Val;
889   }
890 
891   StringRef getToken() const {
892     assert(Kind == k_Token && "Invalid access!");
893     return StringRef(Tok.Data, Tok.Length);
894   }
895 
896   unsigned getReg() const override {
897     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
898     return Reg.RegNum;
899   }
900 
901   const SmallVectorImpl<unsigned> &getRegList() const {
902     assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
903             Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
904             Kind == k_FPSRegisterListWithVPR ||
905             Kind == k_FPDRegisterListWithVPR) &&
906            "Invalid access!");
907     return Registers;
908   }
909 
910   const MCExpr *getImm() const {
911     assert(isImm() && "Invalid access!");
912     return Imm.Val;
913   }
914 
915   const MCExpr *getConstantPoolImm() const {
916     assert(isConstantPoolImm() && "Invalid access!");
917     return Imm.Val;
918   }
919 
920   unsigned getVectorIndex() const {
921     assert(Kind == k_VectorIndex && "Invalid access!");
922     return VectorIndex.Val;
923   }
924 
925   ARM_MB::MemBOpt getMemBarrierOpt() const {
926     assert(Kind == k_MemBarrierOpt && "Invalid access!");
927     return MBOpt.Val;
928   }
929 
930   ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
931     assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
932     return ISBOpt.Val;
933   }
934 
935   ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
936     assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
937     return TSBOpt.Val;
938   }
939 
940   ARM_PROC::IFlags getProcIFlags() const {
941     assert(Kind == k_ProcIFlags && "Invalid access!");
942     return IFlags.Val;
943   }
944 
945   unsigned getMSRMask() const {
946     assert(Kind == k_MSRMask && "Invalid access!");
947     return MMask.Val;
948   }
949 
950   unsigned getBankedReg() const {
951     assert(Kind == k_BankedReg && "Invalid access!");
952     return BankedReg.Val;
953   }
954 
955   bool isCoprocNum() const { return Kind == k_CoprocNum; }
956   bool isCoprocReg() const { return Kind == k_CoprocReg; }
957   bool isCoprocOption() const { return Kind == k_CoprocOption; }
958   bool isCondCode() const { return Kind == k_CondCode; }
959   bool isVPTPred() const { return Kind == k_VPTPred; }
960   bool isCCOut() const { return Kind == k_CCOut; }
961   bool isITMask() const { return Kind == k_ITCondMask; }
962   bool isITCondCode() const { return Kind == k_CondCode; }
963   bool isImm() const override {
964     return Kind == k_Immediate;
965   }
966 
967   bool isARMBranchTarget() const {
968     if (!isImm()) return false;
969 
970     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
971       return CE->getValue() % 4 == 0;
972     return true;
973   }
974 
975 
976   bool isThumbBranchTarget() const {
977     if (!isImm()) return false;
978 
979     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
980       return CE->getValue() % 2 == 0;
981     return true;
982   }
983 
984   // checks whether this operand is an unsigned offset which fits is a field
985   // of specified width and scaled by a specific number of bits
986   template<unsigned width, unsigned scale>
987   bool isUnsignedOffset() const {
988     if (!isImm()) return false;
989     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
990     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
991       int64_t Val = CE->getValue();
992       int64_t Align = 1LL << scale;
993       int64_t Max = Align * ((1LL << width) - 1);
994       return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
995     }
996     return false;
997   }
998 
999   // checks whether this operand is an signed offset which fits is a field
1000   // of specified width and scaled by a specific number of bits
1001   template<unsigned width, unsigned scale>
1002   bool isSignedOffset() const {
1003     if (!isImm()) return false;
1004     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1005     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1006       int64_t Val = CE->getValue();
1007       int64_t Align = 1LL << scale;
1008       int64_t Max = Align * ((1LL << (width-1)) - 1);
1009       int64_t Min = -Align * (1LL << (width-1));
1010       return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1011     }
1012     return false;
1013   }
1014 
1015   // checks whether this operand is a memory operand computed as an offset
1016   // applied to PC. the offset may have 8 bits of magnitude and is represented
1017   // with two bits of shift. textually it may be either [pc, #imm], #imm or
1018   // relocable expression...
1019   bool isThumbMemPC() const {
1020     int64_t Val = 0;
1021     if (isImm()) {
1022       if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1023       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1024       if (!CE) return false;
1025       Val = CE->getValue();
1026     }
1027     else if (isMem()) {
1028       if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1029       if(Memory.BaseRegNum != ARM::PC) return false;
1030       Val = Memory.OffsetImm->getValue();
1031     }
1032     else return false;
1033     return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1034   }
1035 
1036   bool isFPImm() const {
1037     if (!isImm()) return false;
1038     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1039     if (!CE) return false;
1040     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1041     return Val != -1;
1042   }
1043 
1044   template<int64_t N, int64_t M>
1045   bool isImmediate() const {
1046     if (!isImm()) return false;
1047     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1048     if (!CE) return false;
1049     int64_t Value = CE->getValue();
1050     return Value >= N && Value <= M;
1051   }
1052 
1053   template<int64_t N, int64_t M>
1054   bool isImmediateS4() const {
1055     if (!isImm()) return false;
1056     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1057     if (!CE) return false;
1058     int64_t Value = CE->getValue();
1059     return ((Value & 3) == 0) && Value >= N && Value <= M;
1060   }
1061 
1062   bool isFBits16() const {
1063     return isImmediate<0, 17>();
1064   }
1065   bool isFBits32() const {
1066     return isImmediate<1, 33>();
1067   }
1068   bool isImm8s4() const {
1069     return isImmediateS4<-1020, 1020>();
1070   }
1071   bool isImm7s4() const {
1072     return isImmediateS4<-508, 508>();
1073   }
1074   bool isImm0_1020s4() const {
1075     return isImmediateS4<0, 1020>();
1076   }
1077   bool isImm0_508s4() const {
1078     return isImmediateS4<0, 508>();
1079   }
1080   bool isImm0_508s4Neg() const {
1081     if (!isImm()) return false;
1082     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1083     if (!CE) return false;
1084     int64_t Value = -CE->getValue();
1085     // explicitly exclude zero. we want that to use the normal 0_508 version.
1086     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1087   }
1088 
1089   bool isImm0_4095Neg() const {
1090     if (!isImm()) return false;
1091     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1092     if (!CE) return false;
1093     // isImm0_4095Neg is used with 32-bit immediates only.
1094     // 32-bit immediates are zero extended to 64-bit when parsed,
1095     // thus simple -CE->getValue() results in a big negative number,
1096     // not a small positive number as intended
1097     if ((CE->getValue() >> 32) > 0) return false;
1098     uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1099     return Value > 0 && Value < 4096;
1100   }
1101 
1102   bool isImm0_7() const {
1103     return isImmediate<0, 7>();
1104   }
1105 
1106   bool isImm1_16() const {
1107     return isImmediate<1, 16>();
1108   }
1109 
1110   bool isImm1_32() const {
1111     return isImmediate<1, 32>();
1112   }
1113 
1114   bool isImm8_255() const {
1115     return isImmediate<8, 255>();
1116   }
1117 
1118   bool isImm256_65535Expr() const {
1119     if (!isImm()) return false;
1120     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1121     // If it's not a constant expression, it'll generate a fixup and be
1122     // handled later.
1123     if (!CE) return true;
1124     int64_t Value = CE->getValue();
1125     return Value >= 256 && Value < 65536;
1126   }
1127 
1128   bool isImm0_65535Expr() const {
1129     if (!isImm()) return false;
1130     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1131     // If it's not a constant expression, it'll generate a fixup and be
1132     // handled later.
1133     if (!CE) return true;
1134     int64_t Value = CE->getValue();
1135     return Value >= 0 && Value < 65536;
1136   }
1137 
1138   bool isImm24bit() const {
1139     return isImmediate<0, 0xffffff + 1>();
1140   }
1141 
1142   bool isImmThumbSR() const {
1143     return isImmediate<1, 33>();
1144   }
1145 
1146   bool isPKHLSLImm() const {
1147     return isImmediate<0, 32>();
1148   }
1149 
1150   bool isPKHASRImm() const {
1151     return isImmediate<0, 33>();
1152   }
1153 
1154   bool isAdrLabel() const {
1155     // If we have an immediate that's not a constant, treat it as a label
1156     // reference needing a fixup.
1157     if (isImm() && !isa<MCConstantExpr>(getImm()))
1158       return true;
1159 
1160     // If it is a constant, it must fit into a modified immediate encoding.
1161     if (!isImm()) return false;
1162     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1163     if (!CE) return false;
1164     int64_t Value = CE->getValue();
1165     return (ARM_AM::getSOImmVal(Value) != -1 ||
1166             ARM_AM::getSOImmVal(-Value) != -1);
1167   }
1168 
1169   bool isT2SOImm() const {
1170     // If we have an immediate that's not a constant, treat it as an expression
1171     // needing a fixup.
1172     if (isImm() && !isa<MCConstantExpr>(getImm())) {
1173       // We want to avoid matching :upper16: and :lower16: as we want these
1174       // expressions to match in isImm0_65535Expr()
1175       const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1176       return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1177                              ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1178     }
1179     if (!isImm()) return false;
1180     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1181     if (!CE) return false;
1182     int64_t Value = CE->getValue();
1183     return ARM_AM::getT2SOImmVal(Value) != -1;
1184   }
1185 
1186   bool isT2SOImmNot() const {
1187     if (!isImm()) return false;
1188     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1189     if (!CE) return false;
1190     int64_t Value = CE->getValue();
1191     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1192       ARM_AM::getT2SOImmVal(~Value) != -1;
1193   }
1194 
1195   bool isT2SOImmNeg() const {
1196     if (!isImm()) return false;
1197     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1198     if (!CE) return false;
1199     int64_t Value = CE->getValue();
1200     // Only use this when not representable as a plain so_imm.
1201     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1202       ARM_AM::getT2SOImmVal(-Value) != -1;
1203   }
1204 
1205   bool isSetEndImm() const {
1206     if (!isImm()) return false;
1207     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1208     if (!CE) return false;
1209     int64_t Value = CE->getValue();
1210     return Value == 1 || Value == 0;
1211   }
1212 
1213   bool isReg() const override { return Kind == k_Register; }
1214   bool isRegList() const { return Kind == k_RegisterList; }
1215   bool isRegListWithAPSR() const {
1216     return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1217   }
1218   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1219   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1220   bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1221   bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1222   bool isToken() const override { return Kind == k_Token; }
1223   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1224   bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1225   bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1226   bool isMem() const override {
1227     if (Kind != k_Memory)
1228       return false;
1229     if (Memory.BaseRegNum &&
1230         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1231       return false;
1232     if (Memory.OffsetRegNum &&
1233         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1234       return false;
1235     return true;
1236   }
1237   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1238   bool isRegShiftedReg() const {
1239     return Kind == k_ShiftedRegister &&
1240            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1241                RegShiftedReg.SrcReg) &&
1242            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1243                RegShiftedReg.ShiftReg);
1244   }
1245   bool isRegShiftedImm() const {
1246     return Kind == k_ShiftedImmediate &&
1247            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1248                RegShiftedImm.SrcReg);
1249   }
1250   bool isRotImm() const { return Kind == k_RotateImmediate; }
1251   bool isModImm() const { return Kind == k_ModifiedImmediate; }
1252 
1253   bool isModImmNot() const {
1254     if (!isImm()) return false;
1255     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1256     if (!CE) return false;
1257     int64_t Value = CE->getValue();
1258     return ARM_AM::getSOImmVal(~Value) != -1;
1259   }
1260 
1261   bool isModImmNeg() const {
1262     if (!isImm()) return false;
1263     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1264     if (!CE) return false;
1265     int64_t Value = CE->getValue();
1266     return ARM_AM::getSOImmVal(Value) == -1 &&
1267       ARM_AM::getSOImmVal(-Value) != -1;
1268   }
1269 
1270   bool isThumbModImmNeg1_7() const {
1271     if (!isImm()) return false;
1272     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1273     if (!CE) return false;
1274     int32_t Value = -(int32_t)CE->getValue();
1275     return 0 < Value && Value < 8;
1276   }
1277 
1278   bool isThumbModImmNeg8_255() const {
1279     if (!isImm()) return false;
1280     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1281     if (!CE) return false;
1282     int32_t Value = -(int32_t)CE->getValue();
1283     return 7 < Value && Value < 256;
1284   }
1285 
1286   bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1287   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1288   bool isPostIdxRegShifted() const {
1289     return Kind == k_PostIndexRegister &&
1290            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1291   }
1292   bool isPostIdxReg() const {
1293     return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1294   }
1295   bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1296     if (!isMem())
1297       return false;
1298     // No offset of any kind.
1299     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1300      (alignOK || Memory.Alignment == Alignment);
1301   }
1302   bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1303     if (!isMem())
1304       return false;
1305 
1306     if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1307             Memory.BaseRegNum))
1308       return false;
1309 
1310     // No offset of any kind.
1311     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1312      (alignOK || Memory.Alignment == Alignment);
1313   }
1314   bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1315     if (!isMem())
1316       return false;
1317 
1318     if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1319             Memory.BaseRegNum))
1320       return false;
1321 
1322     // No offset of any kind.
1323     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1324      (alignOK || Memory.Alignment == Alignment);
1325   }
1326   bool isMemPCRelImm12() const {
1327     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1328       return false;
1329     // Base register must be PC.
1330     if (Memory.BaseRegNum != ARM::PC)
1331       return false;
1332     // Immediate offset in range [-4095, 4095].
1333     if (!Memory.OffsetImm) return true;
1334     int64_t Val = Memory.OffsetImm->getValue();
1335     return (Val > -4096 && Val < 4096) ||
1336            (Val == std::numeric_limits<int32_t>::min());
1337   }
1338 
1339   bool isAlignedMemory() const {
1340     return isMemNoOffset(true);
1341   }
1342 
1343   bool isAlignedMemoryNone() const {
1344     return isMemNoOffset(false, 0);
1345   }
1346 
1347   bool isDupAlignedMemoryNone() const {
1348     return isMemNoOffset(false, 0);
1349   }
1350 
1351   bool isAlignedMemory16() const {
1352     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1353       return true;
1354     return isMemNoOffset(false, 0);
1355   }
1356 
1357   bool isDupAlignedMemory16() const {
1358     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1359       return true;
1360     return isMemNoOffset(false, 0);
1361   }
1362 
1363   bool isAlignedMemory32() const {
1364     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1365       return true;
1366     return isMemNoOffset(false, 0);
1367   }
1368 
1369   bool isDupAlignedMemory32() const {
1370     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1371       return true;
1372     return isMemNoOffset(false, 0);
1373   }
1374 
1375   bool isAlignedMemory64() const {
1376     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1377       return true;
1378     return isMemNoOffset(false, 0);
1379   }
1380 
1381   bool isDupAlignedMemory64() const {
1382     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1383       return true;
1384     return isMemNoOffset(false, 0);
1385   }
1386 
1387   bool isAlignedMemory64or128() const {
1388     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1389       return true;
1390     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1391       return true;
1392     return isMemNoOffset(false, 0);
1393   }
1394 
1395   bool isDupAlignedMemory64or128() const {
1396     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1397       return true;
1398     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1399       return true;
1400     return isMemNoOffset(false, 0);
1401   }
1402 
1403   bool isAlignedMemory64or128or256() const {
1404     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1405       return true;
1406     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1407       return true;
1408     if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1409       return true;
1410     return isMemNoOffset(false, 0);
1411   }
1412 
1413   bool isAddrMode2() const {
1414     if (!isMem() || Memory.Alignment != 0) return false;
1415     // Check for register offset.
1416     if (Memory.OffsetRegNum) return true;
1417     // Immediate offset in range [-4095, 4095].
1418     if (!Memory.OffsetImm) return true;
1419     int64_t Val = Memory.OffsetImm->getValue();
1420     return Val > -4096 && Val < 4096;
1421   }
1422 
1423   bool isAM2OffsetImm() const {
1424     if (!isImm()) return false;
1425     // Immediate offset in range [-4095, 4095].
1426     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1427     if (!CE) return false;
1428     int64_t Val = CE->getValue();
1429     return (Val == std::numeric_limits<int32_t>::min()) ||
1430            (Val > -4096 && Val < 4096);
1431   }
1432 
1433   bool isAddrMode3() const {
1434     // If we have an immediate that's not a constant, treat it as a label
1435     // reference needing a fixup. If it is a constant, it's something else
1436     // and we reject it.
1437     if (isImm() && !isa<MCConstantExpr>(getImm()))
1438       return true;
1439     if (!isMem() || Memory.Alignment != 0) return false;
1440     // No shifts are legal for AM3.
1441     if (Memory.ShiftType != ARM_AM::no_shift) return false;
1442     // Check for register offset.
1443     if (Memory.OffsetRegNum) return true;
1444     // Immediate offset in range [-255, 255].
1445     if (!Memory.OffsetImm) return true;
1446     int64_t Val = Memory.OffsetImm->getValue();
1447     // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we
1448     // have to check for this too.
1449     return (Val > -256 && Val < 256) ||
1450            Val == std::numeric_limits<int32_t>::min();
1451   }
1452 
1453   bool isAM3Offset() const {
1454     if (isPostIdxReg())
1455       return true;
1456     if (!isImm())
1457       return false;
1458     // Immediate offset in range [-255, 255].
1459     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1460     if (!CE) return false;
1461     int64_t Val = CE->getValue();
1462     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1463     return (Val > -256 && Val < 256) ||
1464            Val == std::numeric_limits<int32_t>::min();
1465   }
1466 
1467   bool isAddrMode5() const {
1468     // If we have an immediate that's not a constant, treat it as a label
1469     // reference needing a fixup. If it is a constant, it's something else
1470     // and we reject it.
1471     if (isImm() && !isa<MCConstantExpr>(getImm()))
1472       return true;
1473     if (!isMem() || Memory.Alignment != 0) return false;
1474     // Check for register offset.
1475     if (Memory.OffsetRegNum) return false;
1476     // Immediate offset in range [-1020, 1020] and a multiple of 4.
1477     if (!Memory.OffsetImm) return true;
1478     int64_t Val = Memory.OffsetImm->getValue();
1479     return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1480       Val == std::numeric_limits<int32_t>::min();
1481   }
1482 
1483   bool isAddrMode5FP16() const {
1484     // If we have an immediate that's not a constant, treat it as a label
1485     // reference needing a fixup. If it is a constant, it's something else
1486     // and we reject it.
1487     if (isImm() && !isa<MCConstantExpr>(getImm()))
1488       return true;
1489     if (!isMem() || Memory.Alignment != 0) return false;
1490     // Check for register offset.
1491     if (Memory.OffsetRegNum) return false;
1492     // Immediate offset in range [-510, 510] and a multiple of 2.
1493     if (!Memory.OffsetImm) return true;
1494     int64_t Val = Memory.OffsetImm->getValue();
1495     return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1496            Val == std::numeric_limits<int32_t>::min();
1497   }
1498 
1499   bool isMemTBB() const {
1500     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1501         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1502       return false;
1503     return true;
1504   }
1505 
1506   bool isMemTBH() const {
1507     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1508         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1509         Memory.Alignment != 0 )
1510       return false;
1511     return true;
1512   }
1513 
1514   bool isMemRegOffset() const {
1515     if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1516       return false;
1517     return true;
1518   }
1519 
1520   bool isT2MemRegOffset() const {
1521     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1522         Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1523       return false;
1524     // Only lsl #{0, 1, 2, 3} allowed.
1525     if (Memory.ShiftType == ARM_AM::no_shift)
1526       return true;
1527     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1528       return false;
1529     return true;
1530   }
1531 
1532   bool isMemThumbRR() const {
1533     // Thumb reg+reg addressing is simple. Just two registers, a base and
1534     // an offset. No shifts, negations or any other complicating factors.
1535     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1536         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1537       return false;
1538     return isARMLowRegister(Memory.BaseRegNum) &&
1539       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1540   }
1541 
1542   bool isMemThumbRIs4() const {
1543     if (!isMem() || Memory.OffsetRegNum != 0 ||
1544         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1545       return false;
1546     // Immediate offset, multiple of 4 in range [0, 124].
1547     if (!Memory.OffsetImm) return true;
1548     int64_t Val = Memory.OffsetImm->getValue();
1549     return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1550   }
1551 
1552   bool isMemThumbRIs2() const {
1553     if (!isMem() || Memory.OffsetRegNum != 0 ||
1554         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1555       return false;
1556     // Immediate offset, multiple of 4 in range [0, 62].
1557     if (!Memory.OffsetImm) return true;
1558     int64_t Val = Memory.OffsetImm->getValue();
1559     return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1560   }
1561 
1562   bool isMemThumbRIs1() const {
1563     if (!isMem() || Memory.OffsetRegNum != 0 ||
1564         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1565       return false;
1566     // Immediate offset in range [0, 31].
1567     if (!Memory.OffsetImm) return true;
1568     int64_t Val = Memory.OffsetImm->getValue();
1569     return Val >= 0 && Val <= 31;
1570   }
1571 
1572   bool isMemThumbSPI() const {
1573     if (!isMem() || Memory.OffsetRegNum != 0 ||
1574         Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1575       return false;
1576     // Immediate offset, multiple of 4 in range [0, 1020].
1577     if (!Memory.OffsetImm) return true;
1578     int64_t Val = Memory.OffsetImm->getValue();
1579     return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1580   }
1581 
1582   bool isMemImm8s4Offset() const {
1583     // If we have an immediate that's not a constant, treat it as a label
1584     // reference needing a fixup. If it is a constant, it's something else
1585     // and we reject it.
1586     if (isImm() && !isa<MCConstantExpr>(getImm()))
1587       return true;
1588     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1589       return false;
1590     // Immediate offset a multiple of 4 in range [-1020, 1020].
1591     if (!Memory.OffsetImm) return true;
1592     int64_t Val = Memory.OffsetImm->getValue();
1593     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1594     return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1595            Val == std::numeric_limits<int32_t>::min();
1596   }
1597   bool isMemImm7s4Offset() const {
1598     // If we have an immediate that's not a constant, treat it as a label
1599     // reference needing a fixup. If it is a constant, it's something else
1600     // and we reject it.
1601     if (isImm() && !isa<MCConstantExpr>(getImm()))
1602       return true;
1603     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1604         !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1605             Memory.BaseRegNum))
1606       return false;
1607     // Immediate offset a multiple of 4 in range [-508, 508].
1608     if (!Memory.OffsetImm) return true;
1609     int64_t Val = Memory.OffsetImm->getValue();
1610     // Special case, #-0 is INT32_MIN.
1611     return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1612   }
1613   bool isMemImm0_1020s4Offset() const {
1614     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1615       return false;
1616     // Immediate offset a multiple of 4 in range [0, 1020].
1617     if (!Memory.OffsetImm) return true;
1618     int64_t Val = Memory.OffsetImm->getValue();
1619     return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1620   }
1621 
1622   bool isMemImm8Offset() const {
1623     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1624       return false;
1625     // Base reg of PC isn't allowed for these encodings.
1626     if (Memory.BaseRegNum == ARM::PC) return false;
1627     // Immediate offset in range [-255, 255].
1628     if (!Memory.OffsetImm) return true;
1629     int64_t Val = Memory.OffsetImm->getValue();
1630     return (Val == std::numeric_limits<int32_t>::min()) ||
1631            (Val > -256 && Val < 256);
1632   }
1633 
1634   bool isMemPosImm8Offset() const {
1635     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1636       return false;
1637     // Immediate offset in range [0, 255].
1638     if (!Memory.OffsetImm) return true;
1639     int64_t Val = Memory.OffsetImm->getValue();
1640     return Val >= 0 && Val < 256;
1641   }
1642 
1643   bool isMemNegImm8Offset() const {
1644     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1645       return false;
1646     // Base reg of PC isn't allowed for these encodings.
1647     if (Memory.BaseRegNum == ARM::PC) return false;
1648     // Immediate offset in range [-255, -1].
1649     if (!Memory.OffsetImm) return false;
1650     int64_t Val = Memory.OffsetImm->getValue();
1651     return (Val == std::numeric_limits<int32_t>::min()) ||
1652            (Val > -256 && Val < 0);
1653   }
1654 
1655   bool isMemUImm12Offset() const {
1656     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1657       return false;
1658     // Immediate offset in range [0, 4095].
1659     if (!Memory.OffsetImm) return true;
1660     int64_t Val = Memory.OffsetImm->getValue();
1661     return (Val >= 0 && Val < 4096);
1662   }
1663 
1664   bool isMemImm12Offset() const {
1665     // If we have an immediate that's not a constant, treat it as a label
1666     // reference needing a fixup. If it is a constant, it's something else
1667     // and we reject it.
1668 
1669     if (isImm() && !isa<MCConstantExpr>(getImm()))
1670       return true;
1671 
1672     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1673       return false;
1674     // Immediate offset in range [-4095, 4095].
1675     if (!Memory.OffsetImm) return true;
1676     int64_t Val = Memory.OffsetImm->getValue();
1677     return (Val > -4096 && Val < 4096) ||
1678            (Val == std::numeric_limits<int32_t>::min());
1679   }
1680 
1681   bool isConstPoolAsmImm() const {
1682     // Delay processing of Constant Pool Immediate, this will turn into
1683     // a constant. Match no other operand
1684     return (isConstantPoolImm());
1685   }
1686 
1687   bool isPostIdxImm8() const {
1688     if (!isImm()) return false;
1689     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1690     if (!CE) return false;
1691     int64_t Val = CE->getValue();
1692     return (Val > -256 && Val < 256) ||
1693            (Val == std::numeric_limits<int32_t>::min());
1694   }
1695 
1696   bool isPostIdxImm8s4() const {
1697     if (!isImm()) return false;
1698     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1699     if (!CE) return false;
1700     int64_t Val = CE->getValue();
1701     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1702            (Val == std::numeric_limits<int32_t>::min());
1703   }
1704 
1705   bool isMSRMask() const { return Kind == k_MSRMask; }
1706   bool isBankedReg() const { return Kind == k_BankedReg; }
1707   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1708 
1709   // NEON operands.
1710   bool isSingleSpacedVectorList() const {
1711     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1712   }
1713 
1714   bool isDoubleSpacedVectorList() const {
1715     return Kind == k_VectorList && VectorList.isDoubleSpaced;
1716   }
1717 
1718   bool isVecListOneD() const {
1719     if (!isSingleSpacedVectorList()) return false;
1720     return VectorList.Count == 1;
1721   }
1722 
1723   bool isVecListDPair() const {
1724     if (!isSingleSpacedVectorList()) return false;
1725     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1726               .contains(VectorList.RegNum));
1727   }
1728 
1729   bool isVecListThreeD() const {
1730     if (!isSingleSpacedVectorList()) return false;
1731     return VectorList.Count == 3;
1732   }
1733 
1734   bool isVecListFourD() const {
1735     if (!isSingleSpacedVectorList()) return false;
1736     return VectorList.Count == 4;
1737   }
1738 
1739   bool isVecListDPairSpaced() const {
1740     if (Kind != k_VectorList) return false;
1741     if (isSingleSpacedVectorList()) return false;
1742     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1743               .contains(VectorList.RegNum));
1744   }
1745 
1746   bool isVecListThreeQ() const {
1747     if (!isDoubleSpacedVectorList()) return false;
1748     return VectorList.Count == 3;
1749   }
1750 
1751   bool isVecListFourQ() const {
1752     if (!isDoubleSpacedVectorList()) return false;
1753     return VectorList.Count == 4;
1754   }
1755 
1756   bool isSingleSpacedVectorAllLanes() const {
1757     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1758   }
1759 
1760   bool isDoubleSpacedVectorAllLanes() const {
1761     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1762   }
1763 
1764   bool isVecListOneDAllLanes() const {
1765     if (!isSingleSpacedVectorAllLanes()) return false;
1766     return VectorList.Count == 1;
1767   }
1768 
1769   bool isVecListDPairAllLanes() const {
1770     if (!isSingleSpacedVectorAllLanes()) return false;
1771     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1772               .contains(VectorList.RegNum));
1773   }
1774 
1775   bool isVecListDPairSpacedAllLanes() const {
1776     if (!isDoubleSpacedVectorAllLanes()) return false;
1777     return VectorList.Count == 2;
1778   }
1779 
1780   bool isVecListThreeDAllLanes() const {
1781     if (!isSingleSpacedVectorAllLanes()) return false;
1782     return VectorList.Count == 3;
1783   }
1784 
1785   bool isVecListThreeQAllLanes() const {
1786     if (!isDoubleSpacedVectorAllLanes()) return false;
1787     return VectorList.Count == 3;
1788   }
1789 
1790   bool isVecListFourDAllLanes() const {
1791     if (!isSingleSpacedVectorAllLanes()) return false;
1792     return VectorList.Count == 4;
1793   }
1794 
1795   bool isVecListFourQAllLanes() const {
1796     if (!isDoubleSpacedVectorAllLanes()) return false;
1797     return VectorList.Count == 4;
1798   }
1799 
1800   bool isSingleSpacedVectorIndexed() const {
1801     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1802   }
1803 
1804   bool isDoubleSpacedVectorIndexed() const {
1805     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1806   }
1807 
1808   bool isVecListOneDByteIndexed() const {
1809     if (!isSingleSpacedVectorIndexed()) return false;
1810     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1811   }
1812 
1813   bool isVecListOneDHWordIndexed() const {
1814     if (!isSingleSpacedVectorIndexed()) return false;
1815     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1816   }
1817 
1818   bool isVecListOneDWordIndexed() const {
1819     if (!isSingleSpacedVectorIndexed()) return false;
1820     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1821   }
1822 
1823   bool isVecListTwoDByteIndexed() const {
1824     if (!isSingleSpacedVectorIndexed()) return false;
1825     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1826   }
1827 
1828   bool isVecListTwoDHWordIndexed() const {
1829     if (!isSingleSpacedVectorIndexed()) return false;
1830     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1831   }
1832 
1833   bool isVecListTwoQWordIndexed() const {
1834     if (!isDoubleSpacedVectorIndexed()) return false;
1835     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1836   }
1837 
1838   bool isVecListTwoQHWordIndexed() const {
1839     if (!isDoubleSpacedVectorIndexed()) return false;
1840     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1841   }
1842 
1843   bool isVecListTwoDWordIndexed() const {
1844     if (!isSingleSpacedVectorIndexed()) return false;
1845     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1846   }
1847 
1848   bool isVecListThreeDByteIndexed() const {
1849     if (!isSingleSpacedVectorIndexed()) return false;
1850     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1851   }
1852 
1853   bool isVecListThreeDHWordIndexed() const {
1854     if (!isSingleSpacedVectorIndexed()) return false;
1855     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1856   }
1857 
1858   bool isVecListThreeQWordIndexed() const {
1859     if (!isDoubleSpacedVectorIndexed()) return false;
1860     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1861   }
1862 
1863   bool isVecListThreeQHWordIndexed() const {
1864     if (!isDoubleSpacedVectorIndexed()) return false;
1865     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1866   }
1867 
1868   bool isVecListThreeDWordIndexed() const {
1869     if (!isSingleSpacedVectorIndexed()) return false;
1870     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1871   }
1872 
1873   bool isVecListFourDByteIndexed() const {
1874     if (!isSingleSpacedVectorIndexed()) return false;
1875     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1876   }
1877 
1878   bool isVecListFourDHWordIndexed() const {
1879     if (!isSingleSpacedVectorIndexed()) return false;
1880     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1881   }
1882 
1883   bool isVecListFourQWordIndexed() const {
1884     if (!isDoubleSpacedVectorIndexed()) return false;
1885     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1886   }
1887 
1888   bool isVecListFourQHWordIndexed() const {
1889     if (!isDoubleSpacedVectorIndexed()) return false;
1890     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1891   }
1892 
1893   bool isVecListFourDWordIndexed() const {
1894     if (!isSingleSpacedVectorIndexed()) return false;
1895     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1896   }
1897 
1898   bool isVectorIndex() const { return Kind == k_VectorIndex; }
1899 
1900   bool isVectorIndex8() const {
1901     if (Kind != k_VectorIndex) return false;
1902     return VectorIndex.Val < 8;
1903   }
1904 
1905   bool isVectorIndex16() const {
1906     if (Kind != k_VectorIndex) return false;
1907     return VectorIndex.Val < 4;
1908   }
1909 
1910   bool isVectorIndex32() const {
1911     if (Kind != k_VectorIndex) return false;
1912     return VectorIndex.Val < 2;
1913   }
1914   bool isVectorIndex64() const {
1915     if (Kind != k_VectorIndex) return false;
1916     return VectorIndex.Val < 1;
1917   }
1918 
1919   bool isNEONi8splat() const {
1920     if (!isImm()) return false;
1921     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1922     // Must be a constant.
1923     if (!CE) return false;
1924     int64_t Value = CE->getValue();
1925     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1926     // value.
1927     return Value >= 0 && Value < 256;
1928   }
1929 
1930   bool isNEONi16splat() const {
1931     if (isNEONByteReplicate(2))
1932       return false; // Leave that for bytes replication and forbid by default.
1933     if (!isImm())
1934       return false;
1935     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1936     // Must be a constant.
1937     if (!CE) return false;
1938     unsigned Value = CE->getValue();
1939     return ARM_AM::isNEONi16splat(Value);
1940   }
1941 
1942   bool isNEONi16splatNot() const {
1943     if (!isImm())
1944       return false;
1945     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1946     // Must be a constant.
1947     if (!CE) return false;
1948     unsigned Value = CE->getValue();
1949     return ARM_AM::isNEONi16splat(~Value & 0xffff);
1950   }
1951 
1952   bool isNEONi32splat() const {
1953     if (isNEONByteReplicate(4))
1954       return false; // Leave that for bytes replication and forbid by default.
1955     if (!isImm())
1956       return false;
1957     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1958     // Must be a constant.
1959     if (!CE) return false;
1960     unsigned Value = CE->getValue();
1961     return ARM_AM::isNEONi32splat(Value);
1962   }
1963 
1964   bool isNEONi32splatNot() const {
1965     if (!isImm())
1966       return false;
1967     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1968     // Must be a constant.
1969     if (!CE) return false;
1970     unsigned Value = CE->getValue();
1971     return ARM_AM::isNEONi32splat(~Value);
1972   }
1973 
1974   static bool isValidNEONi32vmovImm(int64_t Value) {
1975     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1976     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1977     return ((Value & 0xffffffffffffff00) == 0) ||
1978            ((Value & 0xffffffffffff00ff) == 0) ||
1979            ((Value & 0xffffffffff00ffff) == 0) ||
1980            ((Value & 0xffffffff00ffffff) == 0) ||
1981            ((Value & 0xffffffffffff00ff) == 0xff) ||
1982            ((Value & 0xffffffffff00ffff) == 0xffff);
1983   }
1984 
1985   bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
1986     assert((Width == 8 || Width == 16 || Width == 32) &&
1987            "Invalid element width");
1988     assert(NumElems * Width <= 64 && "Invalid result width");
1989 
1990     if (!isImm())
1991       return false;
1992     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1993     // Must be a constant.
1994     if (!CE)
1995       return false;
1996     int64_t Value = CE->getValue();
1997     if (!Value)
1998       return false; // Don't bother with zero.
1999     if (Inv)
2000       Value = ~Value;
2001 
2002     uint64_t Mask = (1ull << Width) - 1;
2003     uint64_t Elem = Value & Mask;
2004     if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2005       return false;
2006     if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2007       return false;
2008 
2009     for (unsigned i = 1; i < NumElems; ++i) {
2010       Value >>= Width;
2011       if ((Value & Mask) != Elem)
2012         return false;
2013     }
2014     return true;
2015   }
2016 
2017   bool isNEONByteReplicate(unsigned NumBytes) const {
2018     return isNEONReplicate(8, NumBytes, false);
2019   }
2020 
2021   static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2022     assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2023            "Invalid source width");
2024     assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2025            "Invalid destination width");
2026     assert(FromW < ToW && "ToW is not less than FromW");
2027   }
2028 
2029   template<unsigned FromW, unsigned ToW>
2030   bool isNEONmovReplicate() const {
2031     checkNeonReplicateArgs(FromW, ToW);
2032     if (ToW == 64 && isNEONi64splat())
2033       return false;
2034     return isNEONReplicate(FromW, ToW / FromW, false);
2035   }
2036 
2037   template<unsigned FromW, unsigned ToW>
2038   bool isNEONinvReplicate() const {
2039     checkNeonReplicateArgs(FromW, ToW);
2040     return isNEONReplicate(FromW, ToW / FromW, true);
2041   }
2042 
2043   bool isNEONi32vmov() const {
2044     if (isNEONByteReplicate(4))
2045       return false; // Let it to be classified as byte-replicate case.
2046     if (!isImm())
2047       return false;
2048     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2049     // Must be a constant.
2050     if (!CE)
2051       return false;
2052     return isValidNEONi32vmovImm(CE->getValue());
2053   }
2054 
2055   bool isNEONi32vmovNeg() const {
2056     if (!isImm()) return false;
2057     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2058     // Must be a constant.
2059     if (!CE) return false;
2060     return isValidNEONi32vmovImm(~CE->getValue());
2061   }
2062 
2063   bool isNEONi64splat() const {
2064     if (!isImm()) return false;
2065     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2066     // Must be a constant.
2067     if (!CE) return false;
2068     uint64_t Value = CE->getValue();
2069     // i64 value with each byte being either 0 or 0xff.
2070     for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2071       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2072     return true;
2073   }
2074 
2075   template<int64_t Angle, int64_t Remainder>
2076   bool isComplexRotation() const {
2077     if (!isImm()) return false;
2078 
2079     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2080     if (!CE) return false;
2081     uint64_t Value = CE->getValue();
2082 
2083     return (Value % Angle == Remainder && Value <= 270);
2084   }
2085 
2086   bool isMVELongShift() const {
2087     if (!isImm()) return false;
2088     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2089     // Must be a constant.
2090     if (!CE) return false;
2091     uint64_t Value = CE->getValue();
2092     return Value >= 1 && Value <= 32;
2093   }
2094 
2095   bool isITCondCodeNoAL() const {
2096     if (!isITCondCode()) return false;
2097     ARMCC::CondCodes CC = getCondCode();
2098     return CC != ARMCC::AL;
2099   }
2100 
2101   bool isITCondCodeRestrictedI() const {
2102     if (!isITCondCode())
2103       return false;
2104     ARMCC::CondCodes CC = getCondCode();
2105     return CC == ARMCC::EQ || CC == ARMCC::NE;
2106   }
2107 
2108   bool isITCondCodeRestrictedS() const {
2109     if (!isITCondCode())
2110       return false;
2111     ARMCC::CondCodes CC = getCondCode();
2112     return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2113            CC == ARMCC::GE;
2114   }
2115 
2116   bool isITCondCodeRestrictedU() const {
2117     if (!isITCondCode())
2118       return false;
2119     ARMCC::CondCodes CC = getCondCode();
2120     return CC == ARMCC::HS || CC == ARMCC::HI;
2121   }
2122 
2123   bool isITCondCodeRestrictedFP() const {
2124     if (!isITCondCode())
2125       return false;
2126     ARMCC::CondCodes CC = getCondCode();
2127     return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2128            CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2129   }
2130 
2131   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2132     // Add as immediates when possible.  Null MCExpr = 0.
2133     if (!Expr)
2134       Inst.addOperand(MCOperand::createImm(0));
2135     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2136       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2137     else
2138       Inst.addOperand(MCOperand::createExpr(Expr));
2139   }
2140 
2141   void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2142     assert(N == 1 && "Invalid number of operands!");
2143     addExpr(Inst, getImm());
2144   }
2145 
2146   void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2147     assert(N == 1 && "Invalid number of operands!");
2148     addExpr(Inst, getImm());
2149   }
2150 
2151   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2152     assert(N == 2 && "Invalid number of operands!");
2153     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2154     unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2155     Inst.addOperand(MCOperand::createReg(RegNum));
2156   }
2157 
2158   void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2159     assert(N == 2 && "Invalid number of operands!");
2160     Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2161     unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2162     Inst.addOperand(MCOperand::createReg(RegNum));
2163   }
2164 
2165   void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2166     assert(N == 3 && "Invalid number of operands!");
2167     addVPTPredNOperands(Inst, N-1);
2168     unsigned RegNum;
2169     if (getVPTPred() == ARMVCC::None) {
2170       RegNum = 0;
2171     } else {
2172       unsigned NextOpIndex = Inst.getNumOperands();
2173       const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()];
2174       int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2175       assert(TiedOp >= 0 &&
2176              "Inactive register in vpred_r is not tied to an output!");
2177       RegNum = Inst.getOperand(TiedOp).getReg();
2178     }
2179     Inst.addOperand(MCOperand::createReg(RegNum));
2180   }
2181 
2182   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2183     assert(N == 1 && "Invalid number of operands!");
2184     Inst.addOperand(MCOperand::createImm(getCoproc()));
2185   }
2186 
2187   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2188     assert(N == 1 && "Invalid number of operands!");
2189     Inst.addOperand(MCOperand::createImm(getCoproc()));
2190   }
2191 
2192   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2193     assert(N == 1 && "Invalid number of operands!");
2194     Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2195   }
2196 
2197   void addITMaskOperands(MCInst &Inst, unsigned N) const {
2198     assert(N == 1 && "Invalid number of operands!");
2199     Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2200   }
2201 
2202   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2203     assert(N == 1 && "Invalid number of operands!");
2204     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2205   }
2206 
2207   void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2208     assert(N == 1 && "Invalid number of operands!");
2209     Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode()))));
2210   }
2211 
2212   void addCCOutOperands(MCInst &Inst, unsigned N) const {
2213     assert(N == 1 && "Invalid number of operands!");
2214     Inst.addOperand(MCOperand::createReg(getReg()));
2215   }
2216 
2217   void addRegOperands(MCInst &Inst, unsigned N) const {
2218     assert(N == 1 && "Invalid number of operands!");
2219     Inst.addOperand(MCOperand::createReg(getReg()));
2220   }
2221 
2222   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2223     assert(N == 3 && "Invalid number of operands!");
2224     assert(isRegShiftedReg() &&
2225            "addRegShiftedRegOperands() on non-RegShiftedReg!");
2226     Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2227     Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2228     Inst.addOperand(MCOperand::createImm(
2229       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2230   }
2231 
2232   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2233     assert(N == 2 && "Invalid number of operands!");
2234     assert(isRegShiftedImm() &&
2235            "addRegShiftedImmOperands() on non-RegShiftedImm!");
2236     Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2237     // Shift of #32 is encoded as 0 where permitted
2238     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2239     Inst.addOperand(MCOperand::createImm(
2240       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2241   }
2242 
2243   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2244     assert(N == 1 && "Invalid number of operands!");
2245     Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2246                                          ShifterImm.Imm));
2247   }
2248 
2249   void addRegListOperands(MCInst &Inst, unsigned N) const {
2250     assert(N == 1 && "Invalid number of operands!");
2251     const SmallVectorImpl<unsigned> &RegList = getRegList();
2252     for (SmallVectorImpl<unsigned>::const_iterator
2253            I = RegList.begin(), E = RegList.end(); I != E; ++I)
2254       Inst.addOperand(MCOperand::createReg(*I));
2255   }
2256 
2257   void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2258     assert(N == 1 && "Invalid number of operands!");
2259     const SmallVectorImpl<unsigned> &RegList = getRegList();
2260     for (SmallVectorImpl<unsigned>::const_iterator
2261            I = RegList.begin(), E = RegList.end(); I != E; ++I)
2262       Inst.addOperand(MCOperand::createReg(*I));
2263   }
2264 
2265   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2266     addRegListOperands(Inst, N);
2267   }
2268 
2269   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2270     addRegListOperands(Inst, N);
2271   }
2272 
2273   void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2274     addRegListOperands(Inst, N);
2275   }
2276 
2277   void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2278     addRegListOperands(Inst, N);
2279   }
2280 
2281   void addRotImmOperands(MCInst &Inst, unsigned N) const {
2282     assert(N == 1 && "Invalid number of operands!");
2283     // Encoded as val>>3. The printer handles display as 8, 16, 24.
2284     Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2285   }
2286 
2287   void addModImmOperands(MCInst &Inst, unsigned N) const {
2288     assert(N == 1 && "Invalid number of operands!");
2289 
2290     // Support for fixups (MCFixup)
2291     if (isImm())
2292       return addImmOperands(Inst, N);
2293 
2294     Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2295   }
2296 
2297   void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2298     assert(N == 1 && "Invalid number of operands!");
2299     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2300     uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2301     Inst.addOperand(MCOperand::createImm(Enc));
2302   }
2303 
2304   void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2305     assert(N == 1 && "Invalid number of operands!");
2306     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2307     uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2308     Inst.addOperand(MCOperand::createImm(Enc));
2309   }
2310 
2311   void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2312     assert(N == 1 && "Invalid number of operands!");
2313     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2314     uint32_t Val = -CE->getValue();
2315     Inst.addOperand(MCOperand::createImm(Val));
2316   }
2317 
2318   void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2319     assert(N == 1 && "Invalid number of operands!");
2320     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2321     uint32_t Val = -CE->getValue();
2322     Inst.addOperand(MCOperand::createImm(Val));
2323   }
2324 
2325   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2326     assert(N == 1 && "Invalid number of operands!");
2327     // Munge the lsb/width into a bitfield mask.
2328     unsigned lsb = Bitfield.LSB;
2329     unsigned width = Bitfield.Width;
2330     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2331     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2332                       (32 - (lsb + width)));
2333     Inst.addOperand(MCOperand::createImm(Mask));
2334   }
2335 
2336   void addImmOperands(MCInst &Inst, unsigned N) const {
2337     assert(N == 1 && "Invalid number of operands!");
2338     addExpr(Inst, getImm());
2339   }
2340 
2341   void addFBits16Operands(MCInst &Inst, unsigned N) const {
2342     assert(N == 1 && "Invalid number of operands!");
2343     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2344     Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2345   }
2346 
2347   void addFBits32Operands(MCInst &Inst, unsigned N) const {
2348     assert(N == 1 && "Invalid number of operands!");
2349     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2350     Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2351   }
2352 
2353   void addFPImmOperands(MCInst &Inst, unsigned N) const {
2354     assert(N == 1 && "Invalid number of operands!");
2355     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2356     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2357     Inst.addOperand(MCOperand::createImm(Val));
2358   }
2359 
2360   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2361     assert(N == 1 && "Invalid number of operands!");
2362     // FIXME: We really want to scale the value here, but the LDRD/STRD
2363     // instruction don't encode operands that way yet.
2364     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2365     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2366   }
2367 
2368   void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2369     assert(N == 1 && "Invalid number of operands!");
2370     // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2371     // instruction don't encode operands that way yet.
2372     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2373     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2374   }
2375 
2376   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2377     assert(N == 1 && "Invalid number of operands!");
2378     // The immediate is scaled by four in the encoding and is stored
2379     // in the MCInst as such. Lop off the low two bits here.
2380     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2381     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2382   }
2383 
2384   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2385     assert(N == 1 && "Invalid number of operands!");
2386     // The immediate is scaled by four in the encoding and is stored
2387     // in the MCInst as such. Lop off the low two bits here.
2388     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2389     Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2390   }
2391 
2392   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2393     assert(N == 1 && "Invalid number of operands!");
2394     // The immediate is scaled by four in the encoding and is stored
2395     // in the MCInst as such. Lop off the low two bits here.
2396     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2397     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2398   }
2399 
2400   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2401     assert(N == 1 && "Invalid number of operands!");
2402     // The constant encodes as the immediate-1, and we store in the instruction
2403     // the bits as encoded, so subtract off one here.
2404     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2405     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2406   }
2407 
2408   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2409     assert(N == 1 && "Invalid number of operands!");
2410     // The constant encodes as the immediate-1, and we store in the instruction
2411     // the bits as encoded, so subtract off one here.
2412     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2413     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2414   }
2415 
2416   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2417     assert(N == 1 && "Invalid number of operands!");
2418     // The constant encodes as the immediate, except for 32, which encodes as
2419     // zero.
2420     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2421     unsigned Imm = CE->getValue();
2422     Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2423   }
2424 
2425   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2426     assert(N == 1 && "Invalid number of operands!");
2427     // An ASR value of 32 encodes as 0, so that's how we want to add it to
2428     // the instruction as well.
2429     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2430     int Val = CE->getValue();
2431     Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2432   }
2433 
2434   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2435     assert(N == 1 && "Invalid number of operands!");
2436     // The operand is actually a t2_so_imm, but we have its bitwise
2437     // negation in the assembly source, so twiddle it here.
2438     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2439     Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2440   }
2441 
2442   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2443     assert(N == 1 && "Invalid number of operands!");
2444     // The operand is actually a t2_so_imm, but we have its
2445     // negation in the assembly source, so twiddle it here.
2446     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2447     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2448   }
2449 
2450   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2451     assert(N == 1 && "Invalid number of operands!");
2452     // The operand is actually an imm0_4095, but we have its
2453     // negation in the assembly source, so twiddle it here.
2454     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2455     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2456   }
2457 
2458   void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2459     if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2460       Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2461       return;
2462     }
2463 
2464     const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2465     assert(SR && "Unknown value type!");
2466     Inst.addOperand(MCOperand::createExpr(SR));
2467   }
2468 
2469   void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2470     assert(N == 1 && "Invalid number of operands!");
2471     if (isImm()) {
2472       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2473       if (CE) {
2474         Inst.addOperand(MCOperand::createImm(CE->getValue()));
2475         return;
2476       }
2477 
2478       const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2479 
2480       assert(SR && "Unknown value type!");
2481       Inst.addOperand(MCOperand::createExpr(SR));
2482       return;
2483     }
2484 
2485     assert(isMem()  && "Unknown value type!");
2486     assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2487     Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
2488   }
2489 
2490   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2491     assert(N == 1 && "Invalid number of operands!");
2492     Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2493   }
2494 
2495   void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2496     assert(N == 1 && "Invalid number of operands!");
2497     Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2498   }
2499 
2500   void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2501     assert(N == 1 && "Invalid number of operands!");
2502     Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2503   }
2504 
2505   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2506     assert(N == 1 && "Invalid number of operands!");
2507     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2508   }
2509 
2510   void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2511     assert(N == 1 && "Invalid number of operands!");
2512     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2513   }
2514 
2515   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2516     assert(N == 1 && "Invalid number of operands!");
2517     int32_t Imm = Memory.OffsetImm->getValue();
2518     Inst.addOperand(MCOperand::createImm(Imm));
2519   }
2520 
2521   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2522     assert(N == 1 && "Invalid number of operands!");
2523     assert(isImm() && "Not an immediate!");
2524 
2525     // If we have an immediate that's not a constant, treat it as a label
2526     // reference needing a fixup.
2527     if (!isa<MCConstantExpr>(getImm())) {
2528       Inst.addOperand(MCOperand::createExpr(getImm()));
2529       return;
2530     }
2531 
2532     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2533     int Val = CE->getValue();
2534     Inst.addOperand(MCOperand::createImm(Val));
2535   }
2536 
2537   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2538     assert(N == 2 && "Invalid number of operands!");
2539     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2540     Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2541   }
2542 
2543   void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2544     addAlignedMemoryOperands(Inst, N);
2545   }
2546 
2547   void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2548     addAlignedMemoryOperands(Inst, N);
2549   }
2550 
2551   void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2552     addAlignedMemoryOperands(Inst, N);
2553   }
2554 
2555   void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2556     addAlignedMemoryOperands(Inst, N);
2557   }
2558 
2559   void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2560     addAlignedMemoryOperands(Inst, N);
2561   }
2562 
2563   void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2564     addAlignedMemoryOperands(Inst, N);
2565   }
2566 
2567   void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2568     addAlignedMemoryOperands(Inst, N);
2569   }
2570 
2571   void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2572     addAlignedMemoryOperands(Inst, N);
2573   }
2574 
2575   void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2576     addAlignedMemoryOperands(Inst, N);
2577   }
2578 
2579   void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2580     addAlignedMemoryOperands(Inst, N);
2581   }
2582 
2583   void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2584     addAlignedMemoryOperands(Inst, N);
2585   }
2586 
2587   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2588     assert(N == 3 && "Invalid number of operands!");
2589     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2590     if (!Memory.OffsetRegNum) {
2591       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2592       // Special case for #-0
2593       if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2594       if (Val < 0) Val = -Val;
2595       Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2596     } else {
2597       // For register offset, we encode the shift type and negation flag
2598       // here.
2599       Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2600                               Memory.ShiftImm, Memory.ShiftType);
2601     }
2602     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2603     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2604     Inst.addOperand(MCOperand::createImm(Val));
2605   }
2606 
2607   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2608     assert(N == 2 && "Invalid number of operands!");
2609     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2610     assert(CE && "non-constant AM2OffsetImm operand!");
2611     int32_t Val = CE->getValue();
2612     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2613     // Special case for #-0
2614     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2615     if (Val < 0) Val = -Val;
2616     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2617     Inst.addOperand(MCOperand::createReg(0));
2618     Inst.addOperand(MCOperand::createImm(Val));
2619   }
2620 
2621   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2622     assert(N == 3 && "Invalid number of operands!");
2623     // If we have an immediate that's not a constant, treat it as a label
2624     // reference needing a fixup. If it is a constant, it's something else
2625     // and we reject it.
2626     if (isImm()) {
2627       Inst.addOperand(MCOperand::createExpr(getImm()));
2628       Inst.addOperand(MCOperand::createReg(0));
2629       Inst.addOperand(MCOperand::createImm(0));
2630       return;
2631     }
2632 
2633     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2634     if (!Memory.OffsetRegNum) {
2635       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2636       // Special case for #-0
2637       if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2638       if (Val < 0) Val = -Val;
2639       Val = ARM_AM::getAM3Opc(AddSub, Val);
2640     } else {
2641       // For register offset, we encode the shift type and negation flag
2642       // here.
2643       Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2644     }
2645     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2646     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2647     Inst.addOperand(MCOperand::createImm(Val));
2648   }
2649 
2650   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2651     assert(N == 2 && "Invalid number of operands!");
2652     if (Kind == k_PostIndexRegister) {
2653       int32_t Val =
2654         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2655       Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2656       Inst.addOperand(MCOperand::createImm(Val));
2657       return;
2658     }
2659 
2660     // Constant offset.
2661     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2662     int32_t Val = CE->getValue();
2663     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2664     // Special case for #-0
2665     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2666     if (Val < 0) Val = -Val;
2667     Val = ARM_AM::getAM3Opc(AddSub, Val);
2668     Inst.addOperand(MCOperand::createReg(0));
2669     Inst.addOperand(MCOperand::createImm(Val));
2670   }
2671 
2672   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2673     assert(N == 2 && "Invalid number of operands!");
2674     // If we have an immediate that's not a constant, treat it as a label
2675     // reference needing a fixup. If it is a constant, it's something else
2676     // and we reject it.
2677     if (isImm()) {
2678       Inst.addOperand(MCOperand::createExpr(getImm()));
2679       Inst.addOperand(MCOperand::createImm(0));
2680       return;
2681     }
2682 
2683     // The lower two bits are always zero and as such are not encoded.
2684     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2685     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2686     // Special case for #-0
2687     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2688     if (Val < 0) Val = -Val;
2689     Val = ARM_AM::getAM5Opc(AddSub, Val);
2690     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2691     Inst.addOperand(MCOperand::createImm(Val));
2692   }
2693 
2694   void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
2695     assert(N == 2 && "Invalid number of operands!");
2696     // If we have an immediate that's not a constant, treat it as a label
2697     // reference needing a fixup. If it is a constant, it's something else
2698     // and we reject it.
2699     if (isImm()) {
2700       Inst.addOperand(MCOperand::createExpr(getImm()));
2701       Inst.addOperand(MCOperand::createImm(0));
2702       return;
2703     }
2704 
2705     // The lower bit is always zero and as such is not encoded.
2706     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
2707     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2708     // Special case for #-0
2709     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2710     if (Val < 0) Val = -Val;
2711     Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
2712     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2713     Inst.addOperand(MCOperand::createImm(Val));
2714   }
2715 
2716   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2717     assert(N == 2 && "Invalid number of operands!");
2718     // If we have an immediate that's not a constant, treat it as a label
2719     // reference needing a fixup. If it is a constant, it's something else
2720     // and we reject it.
2721     if (isImm()) {
2722       Inst.addOperand(MCOperand::createExpr(getImm()));
2723       Inst.addOperand(MCOperand::createImm(0));
2724       return;
2725     }
2726 
2727     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2728     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2729     Inst.addOperand(MCOperand::createImm(Val));
2730   }
2731 
2732   void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
2733     assert(N == 2 && "Invalid number of operands!");
2734     // If we have an immediate that's not a constant, treat it as a label
2735     // reference needing a fixup. If it is a constant, it's something else
2736     // and we reject it.
2737     if (isImm()) {
2738       Inst.addOperand(MCOperand::createExpr(getImm()));
2739       Inst.addOperand(MCOperand::createImm(0));
2740       return;
2741     }
2742 
2743     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2744     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2745     Inst.addOperand(MCOperand::createImm(Val));
2746   }
2747 
2748   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2749     assert(N == 2 && "Invalid number of operands!");
2750     // The lower two bits are always zero and as such are not encoded.
2751     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2752     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2753     Inst.addOperand(MCOperand::createImm(Val));
2754   }
2755 
2756   void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2757     assert(N == 2 && "Invalid number of operands!");
2758     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2759     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2760     Inst.addOperand(MCOperand::createImm(Val));
2761   }
2762 
2763   void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2764     addMemImm8OffsetOperands(Inst, N);
2765   }
2766 
2767   void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2768     addMemImm8OffsetOperands(Inst, N);
2769   }
2770 
2771   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2772     assert(N == 2 && "Invalid number of operands!");
2773     // If this is an immediate, it's a label reference.
2774     if (isImm()) {
2775       addExpr(Inst, getImm());
2776       Inst.addOperand(MCOperand::createImm(0));
2777       return;
2778     }
2779 
2780     // Otherwise, it's a normal memory reg+offset.
2781     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2782     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2783     Inst.addOperand(MCOperand::createImm(Val));
2784   }
2785 
2786   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2787     assert(N == 2 && "Invalid number of operands!");
2788     // If this is an immediate, it's a label reference.
2789     if (isImm()) {
2790       addExpr(Inst, getImm());
2791       Inst.addOperand(MCOperand::createImm(0));
2792       return;
2793     }
2794 
2795     // Otherwise, it's a normal memory reg+offset.
2796     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2797     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2798     Inst.addOperand(MCOperand::createImm(Val));
2799   }
2800 
2801   void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
2802     assert(N == 1 && "Invalid number of operands!");
2803     // This is container for the immediate that we will create the constant
2804     // pool from
2805     addExpr(Inst, getConstantPoolImm());
2806     return;
2807   }
2808 
2809   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2810     assert(N == 2 && "Invalid number of operands!");
2811     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2812     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2813   }
2814 
2815   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2816     assert(N == 2 && "Invalid number of operands!");
2817     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2818     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2819   }
2820 
2821   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2822     assert(N == 3 && "Invalid number of operands!");
2823     unsigned Val =
2824       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2825                         Memory.ShiftImm, Memory.ShiftType);
2826     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2827     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2828     Inst.addOperand(MCOperand::createImm(Val));
2829   }
2830 
2831   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2832     assert(N == 3 && "Invalid number of operands!");
2833     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2834     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2835     Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
2836   }
2837 
2838   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2839     assert(N == 2 && "Invalid number of operands!");
2840     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2841     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2842   }
2843 
2844   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2845     assert(N == 2 && "Invalid number of operands!");
2846     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2847     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2848     Inst.addOperand(MCOperand::createImm(Val));
2849   }
2850 
2851   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2852     assert(N == 2 && "Invalid number of operands!");
2853     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2854     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2855     Inst.addOperand(MCOperand::createImm(Val));
2856   }
2857 
2858   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2859     assert(N == 2 && "Invalid number of operands!");
2860     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2861     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2862     Inst.addOperand(MCOperand::createImm(Val));
2863   }
2864 
2865   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2866     assert(N == 2 && "Invalid number of operands!");
2867     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2868     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2869     Inst.addOperand(MCOperand::createImm(Val));
2870   }
2871 
2872   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2873     assert(N == 1 && "Invalid number of operands!");
2874     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2875     assert(CE && "non-constant post-idx-imm8 operand!");
2876     int Imm = CE->getValue();
2877     bool isAdd = Imm >= 0;
2878     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2879     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2880     Inst.addOperand(MCOperand::createImm(Imm));
2881   }
2882 
2883   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2884     assert(N == 1 && "Invalid number of operands!");
2885     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2886     assert(CE && "non-constant post-idx-imm8s4 operand!");
2887     int Imm = CE->getValue();
2888     bool isAdd = Imm >= 0;
2889     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2890     // Immediate is scaled by 4.
2891     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2892     Inst.addOperand(MCOperand::createImm(Imm));
2893   }
2894 
2895   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2896     assert(N == 2 && "Invalid number of operands!");
2897     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2898     Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
2899   }
2900 
2901   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2902     assert(N == 2 && "Invalid number of operands!");
2903     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2904     // The sign, shift type, and shift amount are encoded in a single operand
2905     // using the AM2 encoding helpers.
2906     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2907     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2908                                      PostIdxReg.ShiftTy);
2909     Inst.addOperand(MCOperand::createImm(Imm));
2910   }
2911 
2912   void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
2913     assert(N == 1 && "Invalid number of operands!");
2914     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2915     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2916   }
2917 
2918   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2919     assert(N == 1 && "Invalid number of operands!");
2920     Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
2921   }
2922 
2923   void addBankedRegOperands(MCInst &Inst, unsigned N) const {
2924     assert(N == 1 && "Invalid number of operands!");
2925     Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
2926   }
2927 
2928   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2929     assert(N == 1 && "Invalid number of operands!");
2930     Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
2931   }
2932 
2933   void addVecListOperands(MCInst &Inst, unsigned N) const {
2934     assert(N == 1 && "Invalid number of operands!");
2935     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2936   }
2937 
2938   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2939     assert(N == 2 && "Invalid number of operands!");
2940     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2941     Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
2942   }
2943 
2944   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2945     assert(N == 1 && "Invalid number of operands!");
2946     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2947   }
2948 
2949   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2950     assert(N == 1 && "Invalid number of operands!");
2951     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2952   }
2953 
2954   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2955     assert(N == 1 && "Invalid number of operands!");
2956     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2957   }
2958 
2959   void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
2960     assert(N == 1 && "Invalid number of operands!");
2961     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2962   }
2963 
2964   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2965     assert(N == 1 && "Invalid number of operands!");
2966     // The immediate encodes the type of constant as well as the value.
2967     // Mask in that this is an i8 splat.
2968     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2969     Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
2970   }
2971 
2972   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2973     assert(N == 1 && "Invalid number of operands!");
2974     // The immediate encodes the type of constant as well as the value.
2975     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2976     unsigned Value = CE->getValue();
2977     Value = ARM_AM::encodeNEONi16splat(Value);
2978     Inst.addOperand(MCOperand::createImm(Value));
2979   }
2980 
2981   void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
2982     assert(N == 1 && "Invalid number of operands!");
2983     // The immediate encodes the type of constant as well as the value.
2984     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2985     unsigned Value = CE->getValue();
2986     Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
2987     Inst.addOperand(MCOperand::createImm(Value));
2988   }
2989 
2990   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2991     assert(N == 1 && "Invalid number of operands!");
2992     // The immediate encodes the type of constant as well as the value.
2993     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2994     unsigned Value = CE->getValue();
2995     Value = ARM_AM::encodeNEONi32splat(Value);
2996     Inst.addOperand(MCOperand::createImm(Value));
2997   }
2998 
2999   void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3000     assert(N == 1 && "Invalid number of operands!");
3001     // The immediate encodes the type of constant as well as the value.
3002     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3003     unsigned Value = CE->getValue();
3004     Value = ARM_AM::encodeNEONi32splat(~Value);
3005     Inst.addOperand(MCOperand::createImm(Value));
3006   }
3007 
3008   void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3009     // The immediate encodes the type of constant as well as the value.
3010     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3011     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3012             Inst.getOpcode() == ARM::VMOVv16i8) &&
3013           "All instructions that wants to replicate non-zero byte "
3014           "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3015     unsigned Value = CE->getValue();
3016     if (Inv)
3017       Value = ~Value;
3018     unsigned B = Value & 0xff;
3019     B |= 0xe00; // cmode = 0b1110
3020     Inst.addOperand(MCOperand::createImm(B));
3021   }
3022 
3023   void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3024     assert(N == 1 && "Invalid number of operands!");
3025     addNEONi8ReplicateOperands(Inst, true);
3026   }
3027 
3028   static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3029     if (Value >= 256 && Value <= 0xffff)
3030       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3031     else if (Value > 0xffff && Value <= 0xffffff)
3032       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3033     else if (Value > 0xffffff)
3034       Value = (Value >> 24) | 0x600;
3035     return Value;
3036   }
3037 
3038   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3039     assert(N == 1 && "Invalid number of operands!");
3040     // The immediate encodes the type of constant as well as the value.
3041     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3042     unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3043     Inst.addOperand(MCOperand::createImm(Value));
3044   }
3045 
3046   void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3047     assert(N == 1 && "Invalid number of operands!");
3048     addNEONi8ReplicateOperands(Inst, false);
3049   }
3050 
3051   void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3052     assert(N == 1 && "Invalid number of operands!");
3053     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3054     assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3055             Inst.getOpcode() == ARM::VMOVv8i16 ||
3056             Inst.getOpcode() == ARM::VMVNv4i16 ||
3057             Inst.getOpcode() == ARM::VMVNv8i16) &&
3058           "All instructions that want to replicate non-zero half-word "
3059           "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3060     uint64_t Value = CE->getValue();
3061     unsigned Elem = Value & 0xffff;
3062     if (Elem >= 256)
3063       Elem = (Elem >> 8) | 0x200;
3064     Inst.addOperand(MCOperand::createImm(Elem));
3065   }
3066 
3067   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3068     assert(N == 1 && "Invalid number of operands!");
3069     // The immediate encodes the type of constant as well as the value.
3070     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3071     unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3072     Inst.addOperand(MCOperand::createImm(Value));
3073   }
3074 
3075   void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3076     assert(N == 1 && "Invalid number of operands!");
3077     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3078     assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3079             Inst.getOpcode() == ARM::VMOVv4i32 ||
3080             Inst.getOpcode() == ARM::VMVNv2i32 ||
3081             Inst.getOpcode() == ARM::VMVNv4i32) &&
3082           "All instructions that want to replicate non-zero word "
3083           "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3084     uint64_t Value = CE->getValue();
3085     unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3086     Inst.addOperand(MCOperand::createImm(Elem));
3087   }
3088 
3089   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3090     assert(N == 1 && "Invalid number of operands!");
3091     // The immediate encodes the type of constant as well as the value.
3092     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3093     uint64_t Value = CE->getValue();
3094     unsigned Imm = 0;
3095     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3096       Imm |= (Value & 1) << i;
3097     }
3098     Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3099   }
3100 
3101   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3102     assert(N == 1 && "Invalid number of operands!");
3103     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3104     Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3105   }
3106 
3107   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3108     assert(N == 1 && "Invalid number of operands!");
3109     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3110     Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3111   }
3112 
3113   void print(raw_ostream &OS) const override;
3114 
3115   static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3116     auto Op = make_unique<ARMOperand>(k_ITCondMask);
3117     Op->ITMask.Mask = Mask;
3118     Op->StartLoc = S;
3119     Op->EndLoc = S;
3120     return Op;
3121   }
3122 
3123   static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3124                                                     SMLoc S) {
3125     auto Op = make_unique<ARMOperand>(k_CondCode);
3126     Op->CC.Val = CC;
3127     Op->StartLoc = S;
3128     Op->EndLoc = S;
3129     return Op;
3130   }
3131 
3132   static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3133                                                    SMLoc S) {
3134     auto Op = make_unique<ARMOperand>(k_VPTPred);
3135     Op->VCC.Val = CC;
3136     Op->StartLoc = S;
3137     Op->EndLoc = S;
3138     return Op;
3139   }
3140 
3141   static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3142     auto Op = make_unique<ARMOperand>(k_CoprocNum);
3143     Op->Cop.Val = CopVal;
3144     Op->StartLoc = S;
3145     Op->EndLoc = S;
3146     return Op;
3147   }
3148 
3149   static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3150     auto Op = make_unique<ARMOperand>(k_CoprocReg);
3151     Op->Cop.Val = CopVal;
3152     Op->StartLoc = S;
3153     Op->EndLoc = S;
3154     return Op;
3155   }
3156 
3157   static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3158                                                         SMLoc E) {
3159     auto Op = make_unique<ARMOperand>(k_CoprocOption);
3160     Op->Cop.Val = Val;
3161     Op->StartLoc = S;
3162     Op->EndLoc = E;
3163     return Op;
3164   }
3165 
3166   static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3167     auto Op = make_unique<ARMOperand>(k_CCOut);
3168     Op->Reg.RegNum = RegNum;
3169     Op->StartLoc = S;
3170     Op->EndLoc = S;
3171     return Op;
3172   }
3173 
3174   static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3175     auto Op = make_unique<ARMOperand>(k_Token);
3176     Op->Tok.Data = Str.data();
3177     Op->Tok.Length = Str.size();
3178     Op->StartLoc = S;
3179     Op->EndLoc = S;
3180     return Op;
3181   }
3182 
3183   static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3184                                                SMLoc E) {
3185     auto Op = make_unique<ARMOperand>(k_Register);
3186     Op->Reg.RegNum = RegNum;
3187     Op->StartLoc = S;
3188     Op->EndLoc = E;
3189     return Op;
3190   }
3191 
3192   static std::unique_ptr<ARMOperand>
3193   CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3194                         unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3195                         SMLoc E) {
3196     auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
3197     Op->RegShiftedReg.ShiftTy = ShTy;
3198     Op->RegShiftedReg.SrcReg = SrcReg;
3199     Op->RegShiftedReg.ShiftReg = ShiftReg;
3200     Op->RegShiftedReg.ShiftImm = ShiftImm;
3201     Op->StartLoc = S;
3202     Op->EndLoc = E;
3203     return Op;
3204   }
3205 
3206   static std::unique_ptr<ARMOperand>
3207   CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3208                          unsigned ShiftImm, SMLoc S, SMLoc E) {
3209     auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
3210     Op->RegShiftedImm.ShiftTy = ShTy;
3211     Op->RegShiftedImm.SrcReg = SrcReg;
3212     Op->RegShiftedImm.ShiftImm = ShiftImm;
3213     Op->StartLoc = S;
3214     Op->EndLoc = E;
3215     return Op;
3216   }
3217 
3218   static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3219                                                       SMLoc S, SMLoc E) {
3220     auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
3221     Op->ShifterImm.isASR = isASR;
3222     Op->ShifterImm.Imm = Imm;
3223     Op->StartLoc = S;
3224     Op->EndLoc = E;
3225     return Op;
3226   }
3227 
3228   static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3229                                                   SMLoc E) {
3230     auto Op = make_unique<ARMOperand>(k_RotateImmediate);
3231     Op->RotImm.Imm = Imm;
3232     Op->StartLoc = S;
3233     Op->EndLoc = E;
3234     return Op;
3235   }
3236 
3237   static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3238                                                   SMLoc S, SMLoc E) {
3239     auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
3240     Op->ModImm.Bits = Bits;
3241     Op->ModImm.Rot = Rot;
3242     Op->StartLoc = S;
3243     Op->EndLoc = E;
3244     return Op;
3245   }
3246 
3247   static std::unique_ptr<ARMOperand>
3248   CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3249     auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate);
3250     Op->Imm.Val = Val;
3251     Op->StartLoc = S;
3252     Op->EndLoc = E;
3253     return Op;
3254   }
3255 
3256   static std::unique_ptr<ARMOperand>
3257   CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3258     auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
3259     Op->Bitfield.LSB = LSB;
3260     Op->Bitfield.Width = Width;
3261     Op->StartLoc = S;
3262     Op->EndLoc = E;
3263     return Op;
3264   }
3265 
3266   static std::unique_ptr<ARMOperand>
3267   CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3268                 SMLoc StartLoc, SMLoc EndLoc) {
3269     assert(Regs.size() > 0 && "RegList contains no registers?");
3270     KindTy Kind = k_RegisterList;
3271 
3272     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3273             Regs.front().second)) {
3274       if (Regs.back().second == ARM::VPR)
3275         Kind = k_FPDRegisterListWithVPR;
3276       else
3277         Kind = k_DPRRegisterList;
3278     } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3279                    Regs.front().second)) {
3280       if (Regs.back().second == ARM::VPR)
3281         Kind = k_FPSRegisterListWithVPR;
3282       else
3283         Kind = k_SPRRegisterList;
3284     }
3285 
3286     // Sort based on the register encoding values.
3287     array_pod_sort(Regs.begin(), Regs.end());
3288 
3289     if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3290       Kind = k_RegisterListWithAPSR;
3291 
3292     auto Op = make_unique<ARMOperand>(Kind);
3293     for (SmallVectorImpl<std::pair<unsigned, unsigned>>::const_iterator
3294            I = Regs.begin(), E = Regs.end(); I != E; ++I)
3295       Op->Registers.push_back(I->second);
3296 
3297     Op->StartLoc = StartLoc;
3298     Op->EndLoc = EndLoc;
3299     return Op;
3300   }
3301 
3302   static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3303                                                       unsigned Count,
3304                                                       bool isDoubleSpaced,
3305                                                       SMLoc S, SMLoc E) {
3306     auto Op = make_unique<ARMOperand>(k_VectorList);
3307     Op->VectorList.RegNum = RegNum;
3308     Op->VectorList.Count = Count;
3309     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3310     Op->StartLoc = S;
3311     Op->EndLoc = E;
3312     return Op;
3313   }
3314 
3315   static std::unique_ptr<ARMOperand>
3316   CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3317                            SMLoc S, SMLoc E) {
3318     auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
3319     Op->VectorList.RegNum = RegNum;
3320     Op->VectorList.Count = Count;
3321     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3322     Op->StartLoc = S;
3323     Op->EndLoc = E;
3324     return Op;
3325   }
3326 
3327   static std::unique_ptr<ARMOperand>
3328   CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3329                           bool isDoubleSpaced, SMLoc S, SMLoc E) {
3330     auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
3331     Op->VectorList.RegNum = RegNum;
3332     Op->VectorList.Count = Count;
3333     Op->VectorList.LaneIndex = Index;
3334     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3335     Op->StartLoc = S;
3336     Op->EndLoc = E;
3337     return Op;
3338   }
3339 
3340   static std::unique_ptr<ARMOperand>
3341   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3342     auto Op = make_unique<ARMOperand>(k_VectorIndex);
3343     Op->VectorIndex.Val = Idx;
3344     Op->StartLoc = S;
3345     Op->EndLoc = E;
3346     return Op;
3347   }
3348 
3349   static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3350                                                SMLoc E) {
3351     auto Op = make_unique<ARMOperand>(k_Immediate);
3352     Op->Imm.Val = Val;
3353     Op->StartLoc = S;
3354     Op->EndLoc = E;
3355     return Op;
3356   }
3357 
3358   static std::unique_ptr<ARMOperand>
3359   CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
3360             unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
3361             unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
3362             SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3363     auto Op = make_unique<ARMOperand>(k_Memory);
3364     Op->Memory.BaseRegNum = BaseRegNum;
3365     Op->Memory.OffsetImm = OffsetImm;
3366     Op->Memory.OffsetRegNum = OffsetRegNum;
3367     Op->Memory.ShiftType = ShiftType;
3368     Op->Memory.ShiftImm = ShiftImm;
3369     Op->Memory.Alignment = Alignment;
3370     Op->Memory.isNegative = isNegative;
3371     Op->StartLoc = S;
3372     Op->EndLoc = E;
3373     Op->AlignmentLoc = AlignmentLoc;
3374     return Op;
3375   }
3376 
3377   static std::unique_ptr<ARMOperand>
3378   CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3379                    unsigned ShiftImm, SMLoc S, SMLoc E) {
3380     auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
3381     Op->PostIdxReg.RegNum = RegNum;
3382     Op->PostIdxReg.isAdd = isAdd;
3383     Op->PostIdxReg.ShiftTy = ShiftTy;
3384     Op->PostIdxReg.ShiftImm = ShiftImm;
3385     Op->StartLoc = S;
3386     Op->EndLoc = E;
3387     return Op;
3388   }
3389 
3390   static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3391                                                          SMLoc S) {
3392     auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
3393     Op->MBOpt.Val = Opt;
3394     Op->StartLoc = S;
3395     Op->EndLoc = S;
3396     return Op;
3397   }
3398 
3399   static std::unique_ptr<ARMOperand>
3400   CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3401     auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3402     Op->ISBOpt.Val = Opt;
3403     Op->StartLoc = S;
3404     Op->EndLoc = S;
3405     return Op;
3406   }
3407 
3408   static std::unique_ptr<ARMOperand>
3409   CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3410     auto Op = make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3411     Op->TSBOpt.Val = Opt;
3412     Op->StartLoc = S;
3413     Op->EndLoc = S;
3414     return Op;
3415   }
3416 
3417   static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3418                                                       SMLoc S) {
3419     auto Op = make_unique<ARMOperand>(k_ProcIFlags);
3420     Op->IFlags.Val = IFlags;
3421     Op->StartLoc = S;
3422     Op->EndLoc = S;
3423     return Op;
3424   }
3425 
3426   static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3427     auto Op = make_unique<ARMOperand>(k_MSRMask);
3428     Op->MMask.Val = MMask;
3429     Op->StartLoc = S;
3430     Op->EndLoc = S;
3431     return Op;
3432   }
3433 
3434   static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3435     auto Op = make_unique<ARMOperand>(k_BankedReg);
3436     Op->BankedReg.Val = Reg;
3437     Op->StartLoc = S;
3438     Op->EndLoc = S;
3439     return Op;
3440   }
3441 };
3442 
3443 } // end anonymous namespace.
3444 
3445 void ARMOperand::print(raw_ostream &OS) const {
3446   auto RegName = [](unsigned Reg) {
3447     if (Reg)
3448       return ARMInstPrinter::getRegisterName(Reg);
3449     else
3450       return "noreg";
3451   };
3452 
3453   switch (Kind) {
3454   case k_CondCode:
3455     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3456     break;
3457   case k_VPTPred:
3458     OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3459     break;
3460   case k_CCOut:
3461     OS << "<ccout " << RegName(getReg()) << ">";
3462     break;
3463   case k_ITCondMask: {
3464     static const char *const MaskStr[] = {
3465       "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3466       "(tt)",      "(ttet)", "(tte)", "(ttee)",
3467       "(t)",       "(tett)", "(tet)", "(tete)",
3468       "(te)",      "(teet)", "(tee)", "(teee)",
3469     };
3470     assert((ITMask.Mask & 0xf) == ITMask.Mask);
3471     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3472     break;
3473   }
3474   case k_CoprocNum:
3475     OS << "<coprocessor number: " << getCoproc() << ">";
3476     break;
3477   case k_CoprocReg:
3478     OS << "<coprocessor register: " << getCoproc() << ">";
3479     break;
3480   case k_CoprocOption:
3481     OS << "<coprocessor option: " << CoprocOption.Val << ">";
3482     break;
3483   case k_MSRMask:
3484     OS << "<mask: " << getMSRMask() << ">";
3485     break;
3486   case k_BankedReg:
3487     OS << "<banked reg: " << getBankedReg() << ">";
3488     break;
3489   case k_Immediate:
3490     OS << *getImm();
3491     break;
3492   case k_MemBarrierOpt:
3493     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3494     break;
3495   case k_InstSyncBarrierOpt:
3496     OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3497     break;
3498   case k_TraceSyncBarrierOpt:
3499     OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3500     break;
3501   case k_Memory:
3502     OS << "<memory";
3503     if (Memory.BaseRegNum)
3504       OS << " base:" << RegName(Memory.BaseRegNum);
3505     if (Memory.OffsetImm)
3506       OS << " offset-imm:" << *Memory.OffsetImm;
3507     if (Memory.OffsetRegNum)
3508       OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3509          << RegName(Memory.OffsetRegNum);
3510     if (Memory.ShiftType != ARM_AM::no_shift) {
3511       OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3512       OS << " shift-imm:" << Memory.ShiftImm;
3513     }
3514     if (Memory.Alignment)
3515       OS << " alignment:" << Memory.Alignment;
3516     OS << ">";
3517     break;
3518   case k_PostIndexRegister:
3519     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3520        << RegName(PostIdxReg.RegNum);
3521     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3522       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3523          << PostIdxReg.ShiftImm;
3524     OS << ">";
3525     break;
3526   case k_ProcIFlags: {
3527     OS << "<ARM_PROC::";
3528     unsigned IFlags = getProcIFlags();
3529     for (int i=2; i >= 0; --i)
3530       if (IFlags & (1 << i))
3531         OS << ARM_PROC::IFlagsToString(1 << i);
3532     OS << ">";
3533     break;
3534   }
3535   case k_Register:
3536     OS << "<register " << RegName(getReg()) << ">";
3537     break;
3538   case k_ShifterImmediate:
3539     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3540        << " #" << ShifterImm.Imm << ">";
3541     break;
3542   case k_ShiftedRegister:
3543     OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3544        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3545        << RegName(RegShiftedReg.ShiftReg) << ">";
3546     break;
3547   case k_ShiftedImmediate:
3548     OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
3549        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
3550        << RegShiftedImm.ShiftImm << ">";
3551     break;
3552   case k_RotateImmediate:
3553     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3554     break;
3555   case k_ModifiedImmediate:
3556     OS << "<mod_imm #" << ModImm.Bits << ", #"
3557        <<  ModImm.Rot << ")>";
3558     break;
3559   case k_ConstantPoolImmediate:
3560     OS << "<constant_pool_imm #" << *getConstantPoolImm();
3561     break;
3562   case k_BitfieldDescriptor:
3563     OS << "<bitfield " << "lsb: " << Bitfield.LSB
3564        << ", width: " << Bitfield.Width << ">";
3565     break;
3566   case k_RegisterList:
3567   case k_RegisterListWithAPSR:
3568   case k_DPRRegisterList:
3569   case k_SPRRegisterList:
3570   case k_FPSRegisterListWithVPR:
3571   case k_FPDRegisterListWithVPR: {
3572     OS << "<register_list ";
3573 
3574     const SmallVectorImpl<unsigned> &RegList = getRegList();
3575     for (SmallVectorImpl<unsigned>::const_iterator
3576            I = RegList.begin(), E = RegList.end(); I != E; ) {
3577       OS << RegName(*I);
3578       if (++I < E) OS << ", ";
3579     }
3580 
3581     OS << ">";
3582     break;
3583   }
3584   case k_VectorList:
3585     OS << "<vector_list " << VectorList.Count << " * "
3586        << RegName(VectorList.RegNum) << ">";
3587     break;
3588   case k_VectorListAllLanes:
3589     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
3590        << RegName(VectorList.RegNum) << ">";
3591     break;
3592   case k_VectorListIndexed:
3593     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
3594        << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
3595     break;
3596   case k_Token:
3597     OS << "'" << getToken() << "'";
3598     break;
3599   case k_VectorIndex:
3600     OS << "<vectorindex " << getVectorIndex() << ">";
3601     break;
3602   }
3603 }
3604 
3605 /// @name Auto-generated Match Functions
3606 /// {
3607 
3608 static unsigned MatchRegisterName(StringRef Name);
3609 
3610 /// }
3611 
3612 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
3613                                  SMLoc &StartLoc, SMLoc &EndLoc) {
3614   const AsmToken &Tok = getParser().getTok();
3615   StartLoc = Tok.getLoc();
3616   EndLoc = Tok.getEndLoc();
3617   RegNo = tryParseRegister();
3618 
3619   return (RegNo == (unsigned)-1);
3620 }
3621 
3622 /// Try to parse a register name.  The token must be an Identifier when called,
3623 /// and if it is a register name the token is eaten and the register number is
3624 /// returned.  Otherwise return -1.
3625 int ARMAsmParser::tryParseRegister() {
3626   MCAsmParser &Parser = getParser();
3627   const AsmToken &Tok = Parser.getTok();
3628   if (Tok.isNot(AsmToken::Identifier)) return -1;
3629 
3630   std::string lowerCase = Tok.getString().lower();
3631   unsigned RegNum = MatchRegisterName(lowerCase);
3632   if (!RegNum) {
3633     RegNum = StringSwitch<unsigned>(lowerCase)
3634       .Case("r13", ARM::SP)
3635       .Case("r14", ARM::LR)
3636       .Case("r15", ARM::PC)
3637       .Case("ip", ARM::R12)
3638       // Additional register name aliases for 'gas' compatibility.
3639       .Case("a1", ARM::R0)
3640       .Case("a2", ARM::R1)
3641       .Case("a3", ARM::R2)
3642       .Case("a4", ARM::R3)
3643       .Case("v1", ARM::R4)
3644       .Case("v2", ARM::R5)
3645       .Case("v3", ARM::R6)
3646       .Case("v4", ARM::R7)
3647       .Case("v5", ARM::R8)
3648       .Case("v6", ARM::R9)
3649       .Case("v7", ARM::R10)
3650       .Case("v8", ARM::R11)
3651       .Case("sb", ARM::R9)
3652       .Case("sl", ARM::R10)
3653       .Case("fp", ARM::R11)
3654       .Default(0);
3655   }
3656   if (!RegNum) {
3657     // Check for aliases registered via .req. Canonicalize to lower case.
3658     // That's more consistent since register names are case insensitive, and
3659     // it's how the original entry was passed in from MC/MCParser/AsmParser.
3660     StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
3661     // If no match, return failure.
3662     if (Entry == RegisterReqs.end())
3663       return -1;
3664     Parser.Lex(); // Eat identifier token.
3665     return Entry->getValue();
3666   }
3667 
3668   // Some FPUs only have 16 D registers, so D16-D31 are invalid
3669   if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
3670     return -1;
3671 
3672   Parser.Lex(); // Eat identifier token.
3673 
3674   return RegNum;
3675 }
3676 
3677 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
3678 // If a recoverable error occurs, return 1. If an irrecoverable error
3679 // occurs, return -1. An irrecoverable error is one where tokens have been
3680 // consumed in the process of trying to parse the shifter (i.e., when it is
3681 // indeed a shifter operand, but malformed).
3682 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
3683   MCAsmParser &Parser = getParser();
3684   SMLoc S = Parser.getTok().getLoc();
3685   const AsmToken &Tok = Parser.getTok();
3686   if (Tok.isNot(AsmToken::Identifier))
3687     return -1;
3688 
3689   std::string lowerCase = Tok.getString().lower();
3690   ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
3691       .Case("asl", ARM_AM::lsl)
3692       .Case("lsl", ARM_AM::lsl)
3693       .Case("lsr", ARM_AM::lsr)
3694       .Case("asr", ARM_AM::asr)
3695       .Case("ror", ARM_AM::ror)
3696       .Case("rrx", ARM_AM::rrx)
3697       .Default(ARM_AM::no_shift);
3698 
3699   if (ShiftTy == ARM_AM::no_shift)
3700     return 1;
3701 
3702   Parser.Lex(); // Eat the operator.
3703 
3704   // The source register for the shift has already been added to the
3705   // operand list, so we need to pop it off and combine it into the shifted
3706   // register operand instead.
3707   std::unique_ptr<ARMOperand> PrevOp(
3708       (ARMOperand *)Operands.pop_back_val().release());
3709   if (!PrevOp->isReg())
3710     return Error(PrevOp->getStartLoc(), "shift must be of a register");
3711   int SrcReg = PrevOp->getReg();
3712 
3713   SMLoc EndLoc;
3714   int64_t Imm = 0;
3715   int ShiftReg = 0;
3716   if (ShiftTy == ARM_AM::rrx) {
3717     // RRX Doesn't have an explicit shift amount. The encoder expects
3718     // the shift register to be the same as the source register. Seems odd,
3719     // but OK.
3720     ShiftReg = SrcReg;
3721   } else {
3722     // Figure out if this is shifted by a constant or a register (for non-RRX).
3723     if (Parser.getTok().is(AsmToken::Hash) ||
3724         Parser.getTok().is(AsmToken::Dollar)) {
3725       Parser.Lex(); // Eat hash.
3726       SMLoc ImmLoc = Parser.getTok().getLoc();
3727       const MCExpr *ShiftExpr = nullptr;
3728       if (getParser().parseExpression(ShiftExpr, EndLoc)) {
3729         Error(ImmLoc, "invalid immediate shift value");
3730         return -1;
3731       }
3732       // The expression must be evaluatable as an immediate.
3733       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
3734       if (!CE) {
3735         Error(ImmLoc, "invalid immediate shift value");
3736         return -1;
3737       }
3738       // Range check the immediate.
3739       // lsl, ror: 0 <= imm <= 31
3740       // lsr, asr: 0 <= imm <= 32
3741       Imm = CE->getValue();
3742       if (Imm < 0 ||
3743           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
3744           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
3745         Error(ImmLoc, "immediate shift value out of range");
3746         return -1;
3747       }
3748       // shift by zero is a nop. Always send it through as lsl.
3749       // ('as' compatibility)
3750       if (Imm == 0)
3751         ShiftTy = ARM_AM::lsl;
3752     } else if (Parser.getTok().is(AsmToken::Identifier)) {
3753       SMLoc L = Parser.getTok().getLoc();
3754       EndLoc = Parser.getTok().getEndLoc();
3755       ShiftReg = tryParseRegister();
3756       if (ShiftReg == -1) {
3757         Error(L, "expected immediate or register in shift operand");
3758         return -1;
3759       }
3760     } else {
3761       Error(Parser.getTok().getLoc(),
3762             "expected immediate or register in shift operand");
3763       return -1;
3764     }
3765   }
3766 
3767   if (ShiftReg && ShiftTy != ARM_AM::rrx)
3768     Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3769                                                          ShiftReg, Imm,
3770                                                          S, EndLoc));
3771   else
3772     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3773                                                           S, EndLoc));
3774 
3775   return 0;
3776 }
3777 
3778 /// Try to parse a register name.  The token must be an Identifier when called.
3779 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
3780 /// if there is a "writeback". 'true' if it's not a register.
3781 ///
3782 /// TODO this is likely to change to allow different register types and or to
3783 /// parse for a specific register type.
3784 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3785   MCAsmParser &Parser = getParser();
3786   SMLoc RegStartLoc = Parser.getTok().getLoc();
3787   SMLoc RegEndLoc = Parser.getTok().getEndLoc();
3788   int RegNo = tryParseRegister();
3789   if (RegNo == -1)
3790     return true;
3791 
3792   Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
3793 
3794   const AsmToken &ExclaimTok = Parser.getTok();
3795   if (ExclaimTok.is(AsmToken::Exclaim)) {
3796     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3797                                                ExclaimTok.getLoc()));
3798     Parser.Lex(); // Eat exclaim token
3799     return false;
3800   }
3801 
3802   // Also check for an index operand. This is only legal for vector registers,
3803   // but that'll get caught OK in operand matching, so we don't need to
3804   // explicitly filter everything else out here.
3805   if (Parser.getTok().is(AsmToken::LBrac)) {
3806     SMLoc SIdx = Parser.getTok().getLoc();
3807     Parser.Lex(); // Eat left bracket token.
3808 
3809     const MCExpr *ImmVal;
3810     if (getParser().parseExpression(ImmVal))
3811       return true;
3812     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3813     if (!MCE)
3814       return TokError("immediate value expected for vector index");
3815 
3816     if (Parser.getTok().isNot(AsmToken::RBrac))
3817       return Error(Parser.getTok().getLoc(), "']' expected");
3818 
3819     SMLoc E = Parser.getTok().getEndLoc();
3820     Parser.Lex(); // Eat right bracket token.
3821 
3822     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3823                                                      SIdx, E,
3824                                                      getContext()));
3825   }
3826 
3827   return false;
3828 }
3829 
3830 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
3831 /// instruction with a symbolic operand name.
3832 /// We accept "crN" syntax for GAS compatibility.
3833 /// <operand-name> ::= <prefix><number>
3834 /// If CoprocOp is 'c', then:
3835 ///   <prefix> ::= c | cr
3836 /// If CoprocOp is 'p', then :
3837 ///   <prefix> ::= p
3838 /// <number> ::= integer in range [0, 15]
3839 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3840   // Use the same layout as the tablegen'erated register name matcher. Ugly,
3841   // but efficient.
3842   if (Name.size() < 2 || Name[0] != CoprocOp)
3843     return -1;
3844   Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3845 
3846   switch (Name.size()) {
3847   default: return -1;
3848   case 1:
3849     switch (Name[0]) {
3850     default:  return -1;
3851     case '0': return 0;
3852     case '1': return 1;
3853     case '2': return 2;
3854     case '3': return 3;
3855     case '4': return 4;
3856     case '5': return 5;
3857     case '6': return 6;
3858     case '7': return 7;
3859     case '8': return 8;
3860     case '9': return 9;
3861     }
3862   case 2:
3863     if (Name[0] != '1')
3864       return -1;
3865     switch (Name[1]) {
3866     default:  return -1;
3867     // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3868     // However, old cores (v5/v6) did use them in that way.
3869     case '0': return 10;
3870     case '1': return 11;
3871     case '2': return 12;
3872     case '3': return 13;
3873     case '4': return 14;
3874     case '5': return 15;
3875     }
3876   }
3877 }
3878 
3879 /// parseITCondCode - Try to parse a condition code for an IT instruction.
3880 OperandMatchResultTy
3881 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3882   MCAsmParser &Parser = getParser();
3883   SMLoc S = Parser.getTok().getLoc();
3884   const AsmToken &Tok = Parser.getTok();
3885   if (!Tok.is(AsmToken::Identifier))
3886     return MatchOperand_NoMatch;
3887   unsigned CC = ARMCondCodeFromString(Tok.getString());
3888   if (CC == ~0U)
3889     return MatchOperand_NoMatch;
3890   Parser.Lex(); // Eat the token.
3891 
3892   Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3893 
3894   return MatchOperand_Success;
3895 }
3896 
3897 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3898 /// token must be an Identifier when called, and if it is a coprocessor
3899 /// number, the token is eaten and the operand is added to the operand list.
3900 OperandMatchResultTy
3901 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3902   MCAsmParser &Parser = getParser();
3903   SMLoc S = Parser.getTok().getLoc();
3904   const AsmToken &Tok = Parser.getTok();
3905   if (Tok.isNot(AsmToken::Identifier))
3906     return MatchOperand_NoMatch;
3907 
3908   int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
3909   if (Num == -1)
3910     return MatchOperand_NoMatch;
3911   // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3912   if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3913     return MatchOperand_NoMatch;
3914 
3915   Parser.Lex(); // Eat identifier token.
3916   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3917   return MatchOperand_Success;
3918 }
3919 
3920 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3921 /// token must be an Identifier when called, and if it is a coprocessor
3922 /// number, the token is eaten and the operand is added to the operand list.
3923 OperandMatchResultTy
3924 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3925   MCAsmParser &Parser = getParser();
3926   SMLoc S = Parser.getTok().getLoc();
3927   const AsmToken &Tok = Parser.getTok();
3928   if (Tok.isNot(AsmToken::Identifier))
3929     return MatchOperand_NoMatch;
3930 
3931   int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
3932   if (Reg == -1)
3933     return MatchOperand_NoMatch;
3934 
3935   Parser.Lex(); // Eat identifier token.
3936   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3937   return MatchOperand_Success;
3938 }
3939 
3940 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3941 /// coproc_option : '{' imm0_255 '}'
3942 OperandMatchResultTy
3943 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3944   MCAsmParser &Parser = getParser();
3945   SMLoc S = Parser.getTok().getLoc();
3946 
3947   // If this isn't a '{', this isn't a coprocessor immediate operand.
3948   if (Parser.getTok().isNot(AsmToken::LCurly))
3949     return MatchOperand_NoMatch;
3950   Parser.Lex(); // Eat the '{'
3951 
3952   const MCExpr *Expr;
3953   SMLoc Loc = Parser.getTok().getLoc();
3954   if (getParser().parseExpression(Expr)) {
3955     Error(Loc, "illegal expression");
3956     return MatchOperand_ParseFail;
3957   }
3958   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3959   if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3960     Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3961     return MatchOperand_ParseFail;
3962   }
3963   int Val = CE->getValue();
3964 
3965   // Check for and consume the closing '}'
3966   if (Parser.getTok().isNot(AsmToken::RCurly))
3967     return MatchOperand_ParseFail;
3968   SMLoc E = Parser.getTok().getEndLoc();
3969   Parser.Lex(); // Eat the '}'
3970 
3971   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3972   return MatchOperand_Success;
3973 }
3974 
3975 // For register list parsing, we need to map from raw GPR register numbering
3976 // to the enumeration values. The enumeration values aren't sorted by
3977 // register number due to our using "sp", "lr" and "pc" as canonical names.
3978 static unsigned getNextRegister(unsigned Reg) {
3979   // If this is a GPR, we need to do it manually, otherwise we can rely
3980   // on the sort ordering of the enumeration since the other reg-classes
3981   // are sane.
3982   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3983     return Reg + 1;
3984   switch(Reg) {
3985   default: llvm_unreachable("Invalid GPR number!");
3986   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
3987   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
3988   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
3989   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
3990   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
3991   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3992   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
3993   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
3994   }
3995 }
3996 
3997 /// Parse a register list.
3998 bool ARMAsmParser::parseRegisterList(OperandVector &Operands,
3999                                      bool EnforceOrder) {
4000   MCAsmParser &Parser = getParser();
4001   if (Parser.getTok().isNot(AsmToken::LCurly))
4002     return TokError("Token is not a Left Curly Brace");
4003   SMLoc S = Parser.getTok().getLoc();
4004   Parser.Lex(); // Eat '{' token.
4005   SMLoc RegLoc = Parser.getTok().getLoc();
4006 
4007   // Check the first register in the list to see what register class
4008   // this is a list of.
4009   int Reg = tryParseRegister();
4010   if (Reg == -1)
4011     return Error(RegLoc, "register expected");
4012 
4013   // The reglist instructions have at most 16 registers, so reserve
4014   // space for that many.
4015   int EReg = 0;
4016   SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
4017 
4018   // Allow Q regs and just interpret them as the two D sub-registers.
4019   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4020     Reg = getDRegFromQReg(Reg);
4021     EReg = MRI->getEncodingValue(Reg);
4022     Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4023     ++Reg;
4024   }
4025   const MCRegisterClass *RC;
4026   if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4027     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4028   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4029     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4030   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4031     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4032   else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4033     RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4034   else
4035     return Error(RegLoc, "invalid register in register list");
4036 
4037   // Store the register.
4038   EReg = MRI->getEncodingValue(Reg);
4039   Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4040 
4041   // This starts immediately after the first register token in the list,
4042   // so we can see either a comma or a minus (range separator) as a legal
4043   // next token.
4044   while (Parser.getTok().is(AsmToken::Comma) ||
4045          Parser.getTok().is(AsmToken::Minus)) {
4046     if (Parser.getTok().is(AsmToken::Minus)) {
4047       Parser.Lex(); // Eat the minus.
4048       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4049       int EndReg = tryParseRegister();
4050       if (EndReg == -1)
4051         return Error(AfterMinusLoc, "register expected");
4052       // Allow Q regs and just interpret them as the two D sub-registers.
4053       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4054         EndReg = getDRegFromQReg(EndReg) + 1;
4055       // If the register is the same as the start reg, there's nothing
4056       // more to do.
4057       if (Reg == EndReg)
4058         continue;
4059       // The register must be in the same register class as the first.
4060       if (!RC->contains(EndReg))
4061         return Error(AfterMinusLoc, "invalid register in register list");
4062       // Ranges must go from low to high.
4063       if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4064         return Error(AfterMinusLoc, "bad range in register list");
4065 
4066       // Add all the registers in the range to the register list.
4067       while (Reg != EndReg) {
4068         Reg = getNextRegister(Reg);
4069         EReg = MRI->getEncodingValue(Reg);
4070         Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4071       }
4072       continue;
4073     }
4074     Parser.Lex(); // Eat the comma.
4075     RegLoc = Parser.getTok().getLoc();
4076     int OldReg = Reg;
4077     const AsmToken RegTok = Parser.getTok();
4078     Reg = tryParseRegister();
4079     if (Reg == -1)
4080       return Error(RegLoc, "register expected");
4081     // Allow Q regs and just interpret them as the two D sub-registers.
4082     bool isQReg = false;
4083     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4084       Reg = getDRegFromQReg(Reg);
4085       isQReg = true;
4086     }
4087     if (!RC->contains(Reg) &&
4088         RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4089         ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4090       // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4091       // subset of GPRRegClassId except it contains APSR as well.
4092       RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4093     }
4094     if (Reg == ARM::VPR && (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4095                             RC == &ARMMCRegisterClasses[ARM::DPRRegClassID])) {
4096       RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4097       EReg = MRI->getEncodingValue(Reg);
4098       Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4099       continue;
4100     }
4101     // The register must be in the same register class as the first.
4102     if (!RC->contains(Reg))
4103       return Error(RegLoc, "invalid register in register list");
4104     // In most cases, the list must be monotonically increasing. An
4105     // exception is CLRM, which is order-independent anyway, so
4106     // there's no potential for confusion if you write clrm {r2,r1}
4107     // instead of clrm {r1,r2}.
4108     if (EnforceOrder &&
4109         MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4110       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4111         Warning(RegLoc, "register list not in ascending order");
4112       else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4113         return Error(RegLoc, "register list not in ascending order");
4114     }
4115     if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
4116       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4117               ") in register list");
4118       continue;
4119     }
4120     // VFP register lists must also be contiguous.
4121     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4122         RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4123         Reg != OldReg + 1)
4124       return Error(RegLoc, "non-contiguous register range");
4125     EReg = MRI->getEncodingValue(Reg);
4126     Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4127     if (isQReg) {
4128       EReg = MRI->getEncodingValue(++Reg);
4129       Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4130     }
4131   }
4132 
4133   if (Parser.getTok().isNot(AsmToken::RCurly))
4134     return Error(Parser.getTok().getLoc(), "'}' expected");
4135   SMLoc E = Parser.getTok().getEndLoc();
4136   Parser.Lex(); // Eat '}' token.
4137 
4138   // Push the register list operand.
4139   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4140 
4141   // The ARM system instruction variants for LDM/STM have a '^' token here.
4142   if (Parser.getTok().is(AsmToken::Caret)) {
4143     Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4144     Parser.Lex(); // Eat '^' token.
4145   }
4146 
4147   return false;
4148 }
4149 
4150 // Helper function to parse the lane index for vector lists.
4151 OperandMatchResultTy ARMAsmParser::
4152 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
4153   MCAsmParser &Parser = getParser();
4154   Index = 0; // Always return a defined index value.
4155   if (Parser.getTok().is(AsmToken::LBrac)) {
4156     Parser.Lex(); // Eat the '['.
4157     if (Parser.getTok().is(AsmToken::RBrac)) {
4158       // "Dn[]" is the 'all lanes' syntax.
4159       LaneKind = AllLanes;
4160       EndLoc = Parser.getTok().getEndLoc();
4161       Parser.Lex(); // Eat the ']'.
4162       return MatchOperand_Success;
4163     }
4164 
4165     // There's an optional '#' token here. Normally there wouldn't be, but
4166     // inline assemble puts one in, and it's friendly to accept that.
4167     if (Parser.getTok().is(AsmToken::Hash))
4168       Parser.Lex(); // Eat '#' or '$'.
4169 
4170     const MCExpr *LaneIndex;
4171     SMLoc Loc = Parser.getTok().getLoc();
4172     if (getParser().parseExpression(LaneIndex)) {
4173       Error(Loc, "illegal expression");
4174       return MatchOperand_ParseFail;
4175     }
4176     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4177     if (!CE) {
4178       Error(Loc, "lane index must be empty or an integer");
4179       return MatchOperand_ParseFail;
4180     }
4181     if (Parser.getTok().isNot(AsmToken::RBrac)) {
4182       Error(Parser.getTok().getLoc(), "']' expected");
4183       return MatchOperand_ParseFail;
4184     }
4185     EndLoc = Parser.getTok().getEndLoc();
4186     Parser.Lex(); // Eat the ']'.
4187     int64_t Val = CE->getValue();
4188 
4189     // FIXME: Make this range check context sensitive for .8, .16, .32.
4190     if (Val < 0 || Val > 7) {
4191       Error(Parser.getTok().getLoc(), "lane index out of range");
4192       return MatchOperand_ParseFail;
4193     }
4194     Index = Val;
4195     LaneKind = IndexedLane;
4196     return MatchOperand_Success;
4197   }
4198   LaneKind = NoLanes;
4199   return MatchOperand_Success;
4200 }
4201 
4202 // parse a vector register list
4203 OperandMatchResultTy
4204 ARMAsmParser::parseVectorList(OperandVector &Operands) {
4205   MCAsmParser &Parser = getParser();
4206   VectorLaneTy LaneKind;
4207   unsigned LaneIndex;
4208   SMLoc S = Parser.getTok().getLoc();
4209   // As an extension (to match gas), support a plain D register or Q register
4210   // (without encosing curly braces) as a single or double entry list,
4211   // respectively.
4212   if (Parser.getTok().is(AsmToken::Identifier)) {
4213     SMLoc E = Parser.getTok().getEndLoc();
4214     int Reg = tryParseRegister();
4215     if (Reg == -1)
4216       return MatchOperand_NoMatch;
4217     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4218       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4219       if (Res != MatchOperand_Success)
4220         return Res;
4221       switch (LaneKind) {
4222       case NoLanes:
4223         Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4224         break;
4225       case AllLanes:
4226         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4227                                                                 S, E));
4228         break;
4229       case IndexedLane:
4230         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4231                                                                LaneIndex,
4232                                                                false, S, E));
4233         break;
4234       }
4235       return MatchOperand_Success;
4236     }
4237     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4238       Reg = getDRegFromQReg(Reg);
4239       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4240       if (Res != MatchOperand_Success)
4241         return Res;
4242       switch (LaneKind) {
4243       case NoLanes:
4244         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4245                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4246         Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4247         break;
4248       case AllLanes:
4249         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4250                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4251         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4252                                                                 S, E));
4253         break;
4254       case IndexedLane:
4255         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4256                                                                LaneIndex,
4257                                                                false, S, E));
4258         break;
4259       }
4260       return MatchOperand_Success;
4261     }
4262     Error(S, "vector register expected");
4263     return MatchOperand_ParseFail;
4264   }
4265 
4266   if (Parser.getTok().isNot(AsmToken::LCurly))
4267     return MatchOperand_NoMatch;
4268 
4269   Parser.Lex(); // Eat '{' token.
4270   SMLoc RegLoc = Parser.getTok().getLoc();
4271 
4272   int Reg = tryParseRegister();
4273   if (Reg == -1) {
4274     Error(RegLoc, "register expected");
4275     return MatchOperand_ParseFail;
4276   }
4277   unsigned Count = 1;
4278   int Spacing = 0;
4279   unsigned FirstReg = Reg;
4280   // The list is of D registers, but we also allow Q regs and just interpret
4281   // them as the two D sub-registers.
4282   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4283     FirstReg = Reg = getDRegFromQReg(Reg);
4284     Spacing = 1; // double-spacing requires explicit D registers, otherwise
4285                  // it's ambiguous with four-register single spaced.
4286     ++Reg;
4287     ++Count;
4288   }
4289 
4290   SMLoc E;
4291   if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
4292     return MatchOperand_ParseFail;
4293 
4294   while (Parser.getTok().is(AsmToken::Comma) ||
4295          Parser.getTok().is(AsmToken::Minus)) {
4296     if (Parser.getTok().is(AsmToken::Minus)) {
4297       if (!Spacing)
4298         Spacing = 1; // Register range implies a single spaced list.
4299       else if (Spacing == 2) {
4300         Error(Parser.getTok().getLoc(),
4301               "sequential registers in double spaced list");
4302         return MatchOperand_ParseFail;
4303       }
4304       Parser.Lex(); // Eat the minus.
4305       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4306       int EndReg = tryParseRegister();
4307       if (EndReg == -1) {
4308         Error(AfterMinusLoc, "register expected");
4309         return MatchOperand_ParseFail;
4310       }
4311       // Allow Q regs and just interpret them as the two D sub-registers.
4312       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4313         EndReg = getDRegFromQReg(EndReg) + 1;
4314       // If the register is the same as the start reg, there's nothing
4315       // more to do.
4316       if (Reg == EndReg)
4317         continue;
4318       // The register must be in the same register class as the first.
4319       if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
4320         Error(AfterMinusLoc, "invalid register in register list");
4321         return MatchOperand_ParseFail;
4322       }
4323       // Ranges must go from low to high.
4324       if (Reg > EndReg) {
4325         Error(AfterMinusLoc, "bad range in register list");
4326         return MatchOperand_ParseFail;
4327       }
4328       // Parse the lane specifier if present.
4329       VectorLaneTy NextLaneKind;
4330       unsigned NextLaneIndex;
4331       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4332           MatchOperand_Success)
4333         return MatchOperand_ParseFail;
4334       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4335         Error(AfterMinusLoc, "mismatched lane index in register list");
4336         return MatchOperand_ParseFail;
4337       }
4338 
4339       // Add all the registers in the range to the register list.
4340       Count += EndReg - Reg;
4341       Reg = EndReg;
4342       continue;
4343     }
4344     Parser.Lex(); // Eat the comma.
4345     RegLoc = Parser.getTok().getLoc();
4346     int OldReg = Reg;
4347     Reg = tryParseRegister();
4348     if (Reg == -1) {
4349       Error(RegLoc, "register expected");
4350       return MatchOperand_ParseFail;
4351     }
4352     // vector register lists must be contiguous.
4353     // It's OK to use the enumeration values directly here rather, as the
4354     // VFP register classes have the enum sorted properly.
4355     //
4356     // The list is of D registers, but we also allow Q regs and just interpret
4357     // them as the two D sub-registers.
4358     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4359       if (!Spacing)
4360         Spacing = 1; // Register range implies a single spaced list.
4361       else if (Spacing == 2) {
4362         Error(RegLoc,
4363               "invalid register in double-spaced list (must be 'D' register')");
4364         return MatchOperand_ParseFail;
4365       }
4366       Reg = getDRegFromQReg(Reg);
4367       if (Reg != OldReg + 1) {
4368         Error(RegLoc, "non-contiguous register range");
4369         return MatchOperand_ParseFail;
4370       }
4371       ++Reg;
4372       Count += 2;
4373       // Parse the lane specifier if present.
4374       VectorLaneTy NextLaneKind;
4375       unsigned NextLaneIndex;
4376       SMLoc LaneLoc = Parser.getTok().getLoc();
4377       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4378           MatchOperand_Success)
4379         return MatchOperand_ParseFail;
4380       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4381         Error(LaneLoc, "mismatched lane index in register list");
4382         return MatchOperand_ParseFail;
4383       }
4384       continue;
4385     }
4386     // Normal D register.
4387     // Figure out the register spacing (single or double) of the list if
4388     // we don't know it already.
4389     if (!Spacing)
4390       Spacing = 1 + (Reg == OldReg + 2);
4391 
4392     // Just check that it's contiguous and keep going.
4393     if (Reg != OldReg + Spacing) {
4394       Error(RegLoc, "non-contiguous register range");
4395       return MatchOperand_ParseFail;
4396     }
4397     ++Count;
4398     // Parse the lane specifier if present.
4399     VectorLaneTy NextLaneKind;
4400     unsigned NextLaneIndex;
4401     SMLoc EndLoc = Parser.getTok().getLoc();
4402     if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4403       return MatchOperand_ParseFail;
4404     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4405       Error(EndLoc, "mismatched lane index in register list");
4406       return MatchOperand_ParseFail;
4407     }
4408   }
4409 
4410   if (Parser.getTok().isNot(AsmToken::RCurly)) {
4411     Error(Parser.getTok().getLoc(), "'}' expected");
4412     return MatchOperand_ParseFail;
4413   }
4414   E = Parser.getTok().getEndLoc();
4415   Parser.Lex(); // Eat '}' token.
4416 
4417   switch (LaneKind) {
4418   case NoLanes:
4419     // Two-register operands have been converted to the
4420     // composite register classes.
4421     if (Count == 2) {
4422       const MCRegisterClass *RC = (Spacing == 1) ?
4423         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4424         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4425       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4426     }
4427     Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
4428                                                     (Spacing == 2), S, E));
4429     break;
4430   case AllLanes:
4431     // Two-register operands have been converted to the
4432     // composite register classes.
4433     if (Count == 2) {
4434       const MCRegisterClass *RC = (Spacing == 1) ?
4435         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4436         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4437       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4438     }
4439     Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
4440                                                             (Spacing == 2),
4441                                                             S, E));
4442     break;
4443   case IndexedLane:
4444     Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4445                                                            LaneIndex,
4446                                                            (Spacing == 2),
4447                                                            S, E));
4448     break;
4449   }
4450   return MatchOperand_Success;
4451 }
4452 
4453 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4454 OperandMatchResultTy
4455 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4456   MCAsmParser &Parser = getParser();
4457   SMLoc S = Parser.getTok().getLoc();
4458   const AsmToken &Tok = Parser.getTok();
4459   unsigned Opt;
4460 
4461   if (Tok.is(AsmToken::Identifier)) {
4462     StringRef OptStr = Tok.getString();
4463 
4464     Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4465       .Case("sy",    ARM_MB::SY)
4466       .Case("st",    ARM_MB::ST)
4467       .Case("ld",    ARM_MB::LD)
4468       .Case("sh",    ARM_MB::ISH)
4469       .Case("ish",   ARM_MB::ISH)
4470       .Case("shst",  ARM_MB::ISHST)
4471       .Case("ishst", ARM_MB::ISHST)
4472       .Case("ishld", ARM_MB::ISHLD)
4473       .Case("nsh",   ARM_MB::NSH)
4474       .Case("un",    ARM_MB::NSH)
4475       .Case("nshst", ARM_MB::NSHST)
4476       .Case("nshld", ARM_MB::NSHLD)
4477       .Case("unst",  ARM_MB::NSHST)
4478       .Case("osh",   ARM_MB::OSH)
4479       .Case("oshst", ARM_MB::OSHST)
4480       .Case("oshld", ARM_MB::OSHLD)
4481       .Default(~0U);
4482 
4483     // ishld, oshld, nshld and ld are only available from ARMv8.
4484     if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4485                         Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4486       Opt = ~0U;
4487 
4488     if (Opt == ~0U)
4489       return MatchOperand_NoMatch;
4490 
4491     Parser.Lex(); // Eat identifier token.
4492   } else if (Tok.is(AsmToken::Hash) ||
4493              Tok.is(AsmToken::Dollar) ||
4494              Tok.is(AsmToken::Integer)) {
4495     if (Parser.getTok().isNot(AsmToken::Integer))
4496       Parser.Lex(); // Eat '#' or '$'.
4497     SMLoc Loc = Parser.getTok().getLoc();
4498 
4499     const MCExpr *MemBarrierID;
4500     if (getParser().parseExpression(MemBarrierID)) {
4501       Error(Loc, "illegal expression");
4502       return MatchOperand_ParseFail;
4503     }
4504 
4505     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4506     if (!CE) {
4507       Error(Loc, "constant expression expected");
4508       return MatchOperand_ParseFail;
4509     }
4510 
4511     int Val = CE->getValue();
4512     if (Val & ~0xf) {
4513       Error(Loc, "immediate value out of range");
4514       return MatchOperand_ParseFail;
4515     }
4516 
4517     Opt = ARM_MB::RESERVED_0 + Val;
4518   } else
4519     return MatchOperand_ParseFail;
4520 
4521   Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4522   return MatchOperand_Success;
4523 }
4524 
4525 OperandMatchResultTy
4526 ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
4527   MCAsmParser &Parser = getParser();
4528   SMLoc S = Parser.getTok().getLoc();
4529   const AsmToken &Tok = Parser.getTok();
4530 
4531   if (Tok.isNot(AsmToken::Identifier))
4532      return MatchOperand_NoMatch;
4533 
4534   if (!Tok.getString().equals_lower("csync"))
4535     return MatchOperand_NoMatch;
4536 
4537   Parser.Lex(); // Eat identifier token.
4538 
4539   Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
4540   return MatchOperand_Success;
4541 }
4542 
4543 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4544 OperandMatchResultTy
4545 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4546   MCAsmParser &Parser = getParser();
4547   SMLoc S = Parser.getTok().getLoc();
4548   const AsmToken &Tok = Parser.getTok();
4549   unsigned Opt;
4550 
4551   if (Tok.is(AsmToken::Identifier)) {
4552     StringRef OptStr = Tok.getString();
4553 
4554     if (OptStr.equals_lower("sy"))
4555       Opt = ARM_ISB::SY;
4556     else
4557       return MatchOperand_NoMatch;
4558 
4559     Parser.Lex(); // Eat identifier token.
4560   } else if (Tok.is(AsmToken::Hash) ||
4561              Tok.is(AsmToken::Dollar) ||
4562              Tok.is(AsmToken::Integer)) {
4563     if (Parser.getTok().isNot(AsmToken::Integer))
4564       Parser.Lex(); // Eat '#' or '$'.
4565     SMLoc Loc = Parser.getTok().getLoc();
4566 
4567     const MCExpr *ISBarrierID;
4568     if (getParser().parseExpression(ISBarrierID)) {
4569       Error(Loc, "illegal expression");
4570       return MatchOperand_ParseFail;
4571     }
4572 
4573     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
4574     if (!CE) {
4575       Error(Loc, "constant expression expected");
4576       return MatchOperand_ParseFail;
4577     }
4578 
4579     int Val = CE->getValue();
4580     if (Val & ~0xf) {
4581       Error(Loc, "immediate value out of range");
4582       return MatchOperand_ParseFail;
4583     }
4584 
4585     Opt = ARM_ISB::RESERVED_0 + Val;
4586   } else
4587     return MatchOperand_ParseFail;
4588 
4589   Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
4590           (ARM_ISB::InstSyncBOpt)Opt, S));
4591   return MatchOperand_Success;
4592 }
4593 
4594 
4595 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
4596 OperandMatchResultTy
4597 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
4598   MCAsmParser &Parser = getParser();
4599   SMLoc S = Parser.getTok().getLoc();
4600   const AsmToken &Tok = Parser.getTok();
4601   if (!Tok.is(AsmToken::Identifier))
4602     return MatchOperand_NoMatch;
4603   StringRef IFlagsStr = Tok.getString();
4604 
4605   // An iflags string of "none" is interpreted to mean that none of the AIF
4606   // bits are set.  Not a terribly useful instruction, but a valid encoding.
4607   unsigned IFlags = 0;
4608   if (IFlagsStr != "none") {
4609         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
4610       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
4611         .Case("a", ARM_PROC::A)
4612         .Case("i", ARM_PROC::I)
4613         .Case("f", ARM_PROC::F)
4614         .Default(~0U);
4615 
4616       // If some specific iflag is already set, it means that some letter is
4617       // present more than once, this is not acceptable.
4618       if (Flag == ~0U || (IFlags & Flag))
4619         return MatchOperand_NoMatch;
4620 
4621       IFlags |= Flag;
4622     }
4623   }
4624 
4625   Parser.Lex(); // Eat identifier token.
4626   Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
4627   return MatchOperand_Success;
4628 }
4629 
4630 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
4631 OperandMatchResultTy
4632 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
4633   MCAsmParser &Parser = getParser();
4634   SMLoc S = Parser.getTok().getLoc();
4635   const AsmToken &Tok = Parser.getTok();
4636 
4637   if (Tok.is(AsmToken::Integer)) {
4638     int64_t Val = Tok.getIntVal();
4639     if (Val > 255 || Val < 0) {
4640       return MatchOperand_NoMatch;
4641     }
4642     unsigned SYSmvalue = Val & 0xFF;
4643     Parser.Lex();
4644     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4645     return MatchOperand_Success;
4646   }
4647 
4648   if (!Tok.is(AsmToken::Identifier))
4649     return MatchOperand_NoMatch;
4650   StringRef Mask = Tok.getString();
4651 
4652   if (isMClass()) {
4653     auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
4654     if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
4655       return MatchOperand_NoMatch;
4656 
4657     unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
4658 
4659     Parser.Lex(); // Eat identifier token.
4660     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4661     return MatchOperand_Success;
4662   }
4663 
4664   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
4665   size_t Start = 0, Next = Mask.find('_');
4666   StringRef Flags = "";
4667   std::string SpecReg = Mask.slice(Start, Next).lower();
4668   if (Next != StringRef::npos)
4669     Flags = Mask.slice(Next+1, Mask.size());
4670 
4671   // FlagsVal contains the complete mask:
4672   // 3-0: Mask
4673   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4674   unsigned FlagsVal = 0;
4675 
4676   if (SpecReg == "apsr") {
4677     FlagsVal = StringSwitch<unsigned>(Flags)
4678     .Case("nzcvq",  0x8) // same as CPSR_f
4679     .Case("g",      0x4) // same as CPSR_s
4680     .Case("nzcvqg", 0xc) // same as CPSR_fs
4681     .Default(~0U);
4682 
4683     if (FlagsVal == ~0U) {
4684       if (!Flags.empty())
4685         return MatchOperand_NoMatch;
4686       else
4687         FlagsVal = 8; // No flag
4688     }
4689   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
4690     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
4691     if (Flags == "all" || Flags == "")
4692       Flags = "fc";
4693     for (int i = 0, e = Flags.size(); i != e; ++i) {
4694       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
4695       .Case("c", 1)
4696       .Case("x", 2)
4697       .Case("s", 4)
4698       .Case("f", 8)
4699       .Default(~0U);
4700 
4701       // If some specific flag is already set, it means that some letter is
4702       // present more than once, this is not acceptable.
4703       if (Flag == ~0U || (FlagsVal & Flag))
4704         return MatchOperand_NoMatch;
4705       FlagsVal |= Flag;
4706     }
4707   } else // No match for special register.
4708     return MatchOperand_NoMatch;
4709 
4710   // Special register without flags is NOT equivalent to "fc" flags.
4711   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
4712   // two lines would enable gas compatibility at the expense of breaking
4713   // round-tripping.
4714   //
4715   // if (!FlagsVal)
4716   //  FlagsVal = 0x9;
4717 
4718   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4719   if (SpecReg == "spsr")
4720     FlagsVal |= 16;
4721 
4722   Parser.Lex(); // Eat identifier token.
4723   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4724   return MatchOperand_Success;
4725 }
4726 
4727 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
4728 /// use in the MRS/MSR instructions added to support virtualization.
4729 OperandMatchResultTy
4730 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
4731   MCAsmParser &Parser = getParser();
4732   SMLoc S = Parser.getTok().getLoc();
4733   const AsmToken &Tok = Parser.getTok();
4734   if (!Tok.is(AsmToken::Identifier))
4735     return MatchOperand_NoMatch;
4736   StringRef RegName = Tok.getString();
4737 
4738   auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
4739   if (!TheReg)
4740     return MatchOperand_NoMatch;
4741   unsigned Encoding = TheReg->Encoding;
4742 
4743   Parser.Lex(); // Eat identifier token.
4744   Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
4745   return MatchOperand_Success;
4746 }
4747 
4748 OperandMatchResultTy
4749 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4750                           int High) {
4751   MCAsmParser &Parser = getParser();
4752   const AsmToken &Tok = Parser.getTok();
4753   if (Tok.isNot(AsmToken::Identifier)) {
4754     Error(Parser.getTok().getLoc(), Op + " operand expected.");
4755     return MatchOperand_ParseFail;
4756   }
4757   StringRef ShiftName = Tok.getString();
4758   std::string LowerOp = Op.lower();
4759   std::string UpperOp = Op.upper();
4760   if (ShiftName != LowerOp && ShiftName != UpperOp) {
4761     Error(Parser.getTok().getLoc(), Op + " operand expected.");
4762     return MatchOperand_ParseFail;
4763   }
4764   Parser.Lex(); // Eat shift type token.
4765 
4766   // There must be a '#' and a shift amount.
4767   if (Parser.getTok().isNot(AsmToken::Hash) &&
4768       Parser.getTok().isNot(AsmToken::Dollar)) {
4769     Error(Parser.getTok().getLoc(), "'#' expected");
4770     return MatchOperand_ParseFail;
4771   }
4772   Parser.Lex(); // Eat hash token.
4773 
4774   const MCExpr *ShiftAmount;
4775   SMLoc Loc = Parser.getTok().getLoc();
4776   SMLoc EndLoc;
4777   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4778     Error(Loc, "illegal expression");
4779     return MatchOperand_ParseFail;
4780   }
4781   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4782   if (!CE) {
4783     Error(Loc, "constant expression expected");
4784     return MatchOperand_ParseFail;
4785   }
4786   int Val = CE->getValue();
4787   if (Val < Low || Val > High) {
4788     Error(Loc, "immediate value out of range");
4789     return MatchOperand_ParseFail;
4790   }
4791 
4792   Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4793 
4794   return MatchOperand_Success;
4795 }
4796 
4797 OperandMatchResultTy
4798 ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4799   MCAsmParser &Parser = getParser();
4800   const AsmToken &Tok = Parser.getTok();
4801   SMLoc S = Tok.getLoc();
4802   if (Tok.isNot(AsmToken::Identifier)) {
4803     Error(S, "'be' or 'le' operand expected");
4804     return MatchOperand_ParseFail;
4805   }
4806   int Val = StringSwitch<int>(Tok.getString().lower())
4807     .Case("be", 1)
4808     .Case("le", 0)
4809     .Default(-1);
4810   Parser.Lex(); // Eat the token.
4811 
4812   if (Val == -1) {
4813     Error(S, "'be' or 'le' operand expected");
4814     return MatchOperand_ParseFail;
4815   }
4816   Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
4817                                                                   getContext()),
4818                                            S, Tok.getEndLoc()));
4819   return MatchOperand_Success;
4820 }
4821 
4822 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4823 /// instructions. Legal values are:
4824 ///     lsl #n  'n' in [0,31]
4825 ///     asr #n  'n' in [1,32]
4826 ///             n == 32 encoded as n == 0.
4827 OperandMatchResultTy
4828 ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4829   MCAsmParser &Parser = getParser();
4830   const AsmToken &Tok = Parser.getTok();
4831   SMLoc S = Tok.getLoc();
4832   if (Tok.isNot(AsmToken::Identifier)) {
4833     Error(S, "shift operator 'asr' or 'lsl' expected");
4834     return MatchOperand_ParseFail;
4835   }
4836   StringRef ShiftName = Tok.getString();
4837   bool isASR;
4838   if (ShiftName == "lsl" || ShiftName == "LSL")
4839     isASR = false;
4840   else if (ShiftName == "asr" || ShiftName == "ASR")
4841     isASR = true;
4842   else {
4843     Error(S, "shift operator 'asr' or 'lsl' expected");
4844     return MatchOperand_ParseFail;
4845   }
4846   Parser.Lex(); // Eat the operator.
4847 
4848   // A '#' and a shift amount.
4849   if (Parser.getTok().isNot(AsmToken::Hash) &&
4850       Parser.getTok().isNot(AsmToken::Dollar)) {
4851     Error(Parser.getTok().getLoc(), "'#' expected");
4852     return MatchOperand_ParseFail;
4853   }
4854   Parser.Lex(); // Eat hash token.
4855   SMLoc ExLoc = Parser.getTok().getLoc();
4856 
4857   const MCExpr *ShiftAmount;
4858   SMLoc EndLoc;
4859   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4860     Error(ExLoc, "malformed shift expression");
4861     return MatchOperand_ParseFail;
4862   }
4863   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4864   if (!CE) {
4865     Error(ExLoc, "shift amount must be an immediate");
4866     return MatchOperand_ParseFail;
4867   }
4868 
4869   int64_t Val = CE->getValue();
4870   if (isASR) {
4871     // Shift amount must be in [1,32]
4872     if (Val < 1 || Val > 32) {
4873       Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4874       return MatchOperand_ParseFail;
4875     }
4876     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4877     if (isThumb() && Val == 32) {
4878       Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4879       return MatchOperand_ParseFail;
4880     }
4881     if (Val == 32) Val = 0;
4882   } else {
4883     // Shift amount must be in [1,32]
4884     if (Val < 0 || Val > 31) {
4885       Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4886       return MatchOperand_ParseFail;
4887     }
4888   }
4889 
4890   Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4891 
4892   return MatchOperand_Success;
4893 }
4894 
4895 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4896 /// of instructions. Legal values are:
4897 ///     ror #n  'n' in {0, 8, 16, 24}
4898 OperandMatchResultTy
4899 ARMAsmParser::parseRotImm(OperandVector &Operands) {
4900   MCAsmParser &Parser = getParser();
4901   const AsmToken &Tok = Parser.getTok();
4902   SMLoc S = Tok.getLoc();
4903   if (Tok.isNot(AsmToken::Identifier))
4904     return MatchOperand_NoMatch;
4905   StringRef ShiftName = Tok.getString();
4906   if (ShiftName != "ror" && ShiftName != "ROR")
4907     return MatchOperand_NoMatch;
4908   Parser.Lex(); // Eat the operator.
4909 
4910   // A '#' and a rotate amount.
4911   if (Parser.getTok().isNot(AsmToken::Hash) &&
4912       Parser.getTok().isNot(AsmToken::Dollar)) {
4913     Error(Parser.getTok().getLoc(), "'#' expected");
4914     return MatchOperand_ParseFail;
4915   }
4916   Parser.Lex(); // Eat hash token.
4917   SMLoc ExLoc = Parser.getTok().getLoc();
4918 
4919   const MCExpr *ShiftAmount;
4920   SMLoc EndLoc;
4921   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4922     Error(ExLoc, "malformed rotate expression");
4923     return MatchOperand_ParseFail;
4924   }
4925   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4926   if (!CE) {
4927     Error(ExLoc, "rotate amount must be an immediate");
4928     return MatchOperand_ParseFail;
4929   }
4930 
4931   int64_t Val = CE->getValue();
4932   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4933   // normally, zero is represented in asm by omitting the rotate operand
4934   // entirely.
4935   if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4936     Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4937     return MatchOperand_ParseFail;
4938   }
4939 
4940   Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4941 
4942   return MatchOperand_Success;
4943 }
4944 
4945 OperandMatchResultTy
4946 ARMAsmParser::parseModImm(OperandVector &Operands) {
4947   MCAsmParser &Parser = getParser();
4948   MCAsmLexer &Lexer = getLexer();
4949   int64_t Imm1, Imm2;
4950 
4951   SMLoc S = Parser.getTok().getLoc();
4952 
4953   // 1) A mod_imm operand can appear in the place of a register name:
4954   //   add r0, #mod_imm
4955   //   add r0, r0, #mod_imm
4956   // to correctly handle the latter, we bail out as soon as we see an
4957   // identifier.
4958   //
4959   // 2) Similarly, we do not want to parse into complex operands:
4960   //   mov r0, #mod_imm
4961   //   mov r0, :lower16:(_foo)
4962   if (Parser.getTok().is(AsmToken::Identifier) ||
4963       Parser.getTok().is(AsmToken::Colon))
4964     return MatchOperand_NoMatch;
4965 
4966   // Hash (dollar) is optional as per the ARMARM
4967   if (Parser.getTok().is(AsmToken::Hash) ||
4968       Parser.getTok().is(AsmToken::Dollar)) {
4969     // Avoid parsing into complex operands (#:)
4970     if (Lexer.peekTok().is(AsmToken::Colon))
4971       return MatchOperand_NoMatch;
4972 
4973     // Eat the hash (dollar)
4974     Parser.Lex();
4975   }
4976 
4977   SMLoc Sx1, Ex1;
4978   Sx1 = Parser.getTok().getLoc();
4979   const MCExpr *Imm1Exp;
4980   if (getParser().parseExpression(Imm1Exp, Ex1)) {
4981     Error(Sx1, "malformed expression");
4982     return MatchOperand_ParseFail;
4983   }
4984 
4985   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
4986 
4987   if (CE) {
4988     // Immediate must fit within 32-bits
4989     Imm1 = CE->getValue();
4990     int Enc = ARM_AM::getSOImmVal(Imm1);
4991     if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
4992       // We have a match!
4993       Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
4994                                                   (Enc & 0xF00) >> 7,
4995                                                   Sx1, Ex1));
4996       return MatchOperand_Success;
4997     }
4998 
4999     // We have parsed an immediate which is not for us, fallback to a plain
5000     // immediate. This can happen for instruction aliases. For an example,
5001     // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5002     // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5003     // instruction with a mod_imm operand. The alias is defined such that the
5004     // parser method is shared, that's why we have to do this here.
5005     if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5006       Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5007       return MatchOperand_Success;
5008     }
5009   } else {
5010     // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5011     // MCFixup). Fallback to a plain immediate.
5012     Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5013     return MatchOperand_Success;
5014   }
5015 
5016   // From this point onward, we expect the input to be a (#bits, #rot) pair
5017   if (Parser.getTok().isNot(AsmToken::Comma)) {
5018     Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
5019     return MatchOperand_ParseFail;
5020   }
5021 
5022   if (Imm1 & ~0xFF) {
5023     Error(Sx1, "immediate operand must a number in the range [0, 255]");
5024     return MatchOperand_ParseFail;
5025   }
5026 
5027   // Eat the comma
5028   Parser.Lex();
5029 
5030   // Repeat for #rot
5031   SMLoc Sx2, Ex2;
5032   Sx2 = Parser.getTok().getLoc();
5033 
5034   // Eat the optional hash (dollar)
5035   if (Parser.getTok().is(AsmToken::Hash) ||
5036       Parser.getTok().is(AsmToken::Dollar))
5037     Parser.Lex();
5038 
5039   const MCExpr *Imm2Exp;
5040   if (getParser().parseExpression(Imm2Exp, Ex2)) {
5041     Error(Sx2, "malformed expression");
5042     return MatchOperand_ParseFail;
5043   }
5044 
5045   CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5046 
5047   if (CE) {
5048     Imm2 = CE->getValue();
5049     if (!(Imm2 & ~0x1E)) {
5050       // We have a match!
5051       Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5052       return MatchOperand_Success;
5053     }
5054     Error(Sx2, "immediate operand must an even number in the range [0, 30]");
5055     return MatchOperand_ParseFail;
5056   } else {
5057     Error(Sx2, "constant expression expected");
5058     return MatchOperand_ParseFail;
5059   }
5060 }
5061 
5062 OperandMatchResultTy
5063 ARMAsmParser::parseBitfield(OperandVector &Operands) {
5064   MCAsmParser &Parser = getParser();
5065   SMLoc S = Parser.getTok().getLoc();
5066   // The bitfield descriptor is really two operands, the LSB and the width.
5067   if (Parser.getTok().isNot(AsmToken::Hash) &&
5068       Parser.getTok().isNot(AsmToken::Dollar)) {
5069     Error(Parser.getTok().getLoc(), "'#' expected");
5070     return MatchOperand_ParseFail;
5071   }
5072   Parser.Lex(); // Eat hash token.
5073 
5074   const MCExpr *LSBExpr;
5075   SMLoc E = Parser.getTok().getLoc();
5076   if (getParser().parseExpression(LSBExpr)) {
5077     Error(E, "malformed immediate expression");
5078     return MatchOperand_ParseFail;
5079   }
5080   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5081   if (!CE) {
5082     Error(E, "'lsb' operand must be an immediate");
5083     return MatchOperand_ParseFail;
5084   }
5085 
5086   int64_t LSB = CE->getValue();
5087   // The LSB must be in the range [0,31]
5088   if (LSB < 0 || LSB > 31) {
5089     Error(E, "'lsb' operand must be in the range [0,31]");
5090     return MatchOperand_ParseFail;
5091   }
5092   E = Parser.getTok().getLoc();
5093 
5094   // Expect another immediate operand.
5095   if (Parser.getTok().isNot(AsmToken::Comma)) {
5096     Error(Parser.getTok().getLoc(), "too few operands");
5097     return MatchOperand_ParseFail;
5098   }
5099   Parser.Lex(); // Eat hash token.
5100   if (Parser.getTok().isNot(AsmToken::Hash) &&
5101       Parser.getTok().isNot(AsmToken::Dollar)) {
5102     Error(Parser.getTok().getLoc(), "'#' expected");
5103     return MatchOperand_ParseFail;
5104   }
5105   Parser.Lex(); // Eat hash token.
5106 
5107   const MCExpr *WidthExpr;
5108   SMLoc EndLoc;
5109   if (getParser().parseExpression(WidthExpr, EndLoc)) {
5110     Error(E, "malformed immediate expression");
5111     return MatchOperand_ParseFail;
5112   }
5113   CE = dyn_cast<MCConstantExpr>(WidthExpr);
5114   if (!CE) {
5115     Error(E, "'width' operand must be an immediate");
5116     return MatchOperand_ParseFail;
5117   }
5118 
5119   int64_t Width = CE->getValue();
5120   // The LSB must be in the range [1,32-lsb]
5121   if (Width < 1 || Width > 32 - LSB) {
5122     Error(E, "'width' operand must be in the range [1,32-lsb]");
5123     return MatchOperand_ParseFail;
5124   }
5125 
5126   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5127 
5128   return MatchOperand_Success;
5129 }
5130 
5131 OperandMatchResultTy
5132 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5133   // Check for a post-index addressing register operand. Specifically:
5134   // postidx_reg := '+' register {, shift}
5135   //              | '-' register {, shift}
5136   //              | register {, shift}
5137 
5138   // This method must return MatchOperand_NoMatch without consuming any tokens
5139   // in the case where there is no match, as other alternatives take other
5140   // parse methods.
5141   MCAsmParser &Parser = getParser();
5142   AsmToken Tok = Parser.getTok();
5143   SMLoc S = Tok.getLoc();
5144   bool haveEaten = false;
5145   bool isAdd = true;
5146   if (Tok.is(AsmToken::Plus)) {
5147     Parser.Lex(); // Eat the '+' token.
5148     haveEaten = true;
5149   } else if (Tok.is(AsmToken::Minus)) {
5150     Parser.Lex(); // Eat the '-' token.
5151     isAdd = false;
5152     haveEaten = true;
5153   }
5154 
5155   SMLoc E = Parser.getTok().getEndLoc();
5156   int Reg = tryParseRegister();
5157   if (Reg == -1) {
5158     if (!haveEaten)
5159       return MatchOperand_NoMatch;
5160     Error(Parser.getTok().getLoc(), "register expected");
5161     return MatchOperand_ParseFail;
5162   }
5163 
5164   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
5165   unsigned ShiftImm = 0;
5166   if (Parser.getTok().is(AsmToken::Comma)) {
5167     Parser.Lex(); // Eat the ','.
5168     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5169       return MatchOperand_ParseFail;
5170 
5171     // FIXME: Only approximates end...may include intervening whitespace.
5172     E = Parser.getTok().getLoc();
5173   }
5174 
5175   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
5176                                                   ShiftImm, S, E));
5177 
5178   return MatchOperand_Success;
5179 }
5180 
5181 OperandMatchResultTy
5182 ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5183   // Check for a post-index addressing register operand. Specifically:
5184   // am3offset := '+' register
5185   //              | '-' register
5186   //              | register
5187   //              | # imm
5188   //              | # + imm
5189   //              | # - imm
5190 
5191   // This method must return MatchOperand_NoMatch without consuming any tokens
5192   // in the case where there is no match, as other alternatives take other
5193   // parse methods.
5194   MCAsmParser &Parser = getParser();
5195   AsmToken Tok = Parser.getTok();
5196   SMLoc S = Tok.getLoc();
5197 
5198   // Do immediates first, as we always parse those if we have a '#'.
5199   if (Parser.getTok().is(AsmToken::Hash) ||
5200       Parser.getTok().is(AsmToken::Dollar)) {
5201     Parser.Lex(); // Eat '#' or '$'.
5202     // Explicitly look for a '-', as we need to encode negative zero
5203     // differently.
5204     bool isNegative = Parser.getTok().is(AsmToken::Minus);
5205     const MCExpr *Offset;
5206     SMLoc E;
5207     if (getParser().parseExpression(Offset, E))
5208       return MatchOperand_ParseFail;
5209     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5210     if (!CE) {
5211       Error(S, "constant expression expected");
5212       return MatchOperand_ParseFail;
5213     }
5214     // Negative zero is encoded as the flag value
5215     // std::numeric_limits<int32_t>::min().
5216     int32_t Val = CE->getValue();
5217     if (isNegative && Val == 0)
5218       Val = std::numeric_limits<int32_t>::min();
5219 
5220     Operands.push_back(
5221       ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
5222 
5223     return MatchOperand_Success;
5224   }
5225 
5226   bool haveEaten = false;
5227   bool isAdd = true;
5228   if (Tok.is(AsmToken::Plus)) {
5229     Parser.Lex(); // Eat the '+' token.
5230     haveEaten = true;
5231   } else if (Tok.is(AsmToken::Minus)) {
5232     Parser.Lex(); // Eat the '-' token.
5233     isAdd = false;
5234     haveEaten = true;
5235   }
5236 
5237   Tok = Parser.getTok();
5238   int Reg = tryParseRegister();
5239   if (Reg == -1) {
5240     if (!haveEaten)
5241       return MatchOperand_NoMatch;
5242     Error(Tok.getLoc(), "register expected");
5243     return MatchOperand_ParseFail;
5244   }
5245 
5246   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
5247                                                   0, S, Tok.getEndLoc()));
5248 
5249   return MatchOperand_Success;
5250 }
5251 
5252 /// Convert parsed operands to MCInst.  Needed here because this instruction
5253 /// only has two register operands, but multiplication is commutative so
5254 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
5255 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5256                                     const OperandVector &Operands) {
5257   ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
5258   ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
5259   // If we have a three-operand form, make sure to set Rn to be the operand
5260   // that isn't the same as Rd.
5261   unsigned RegOp = 4;
5262   if (Operands.size() == 6 &&
5263       ((ARMOperand &)*Operands[4]).getReg() ==
5264           ((ARMOperand &)*Operands[3]).getReg())
5265     RegOp = 5;
5266   ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
5267   Inst.addOperand(Inst.getOperand(0));
5268   ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
5269 }
5270 
5271 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5272                                     const OperandVector &Operands) {
5273   int CondOp = -1, ImmOp = -1;
5274   switch(Inst.getOpcode()) {
5275     case ARM::tB:
5276     case ARM::tBcc:  CondOp = 1; ImmOp = 2; break;
5277 
5278     case ARM::t2B:
5279     case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
5280 
5281     default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
5282   }
5283   // first decide whether or not the branch should be conditional
5284   // by looking at it's location relative to an IT block
5285   if(inITBlock()) {
5286     // inside an IT block we cannot have any conditional branches. any
5287     // such instructions needs to be converted to unconditional form
5288     switch(Inst.getOpcode()) {
5289       case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5290       case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5291     }
5292   } else {
5293     // outside IT blocks we can only have unconditional branches with AL
5294     // condition code or conditional branches with non-AL condition code
5295     unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
5296     switch(Inst.getOpcode()) {
5297       case ARM::tB:
5298       case ARM::tBcc:
5299         Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5300         break;
5301       case ARM::t2B:
5302       case ARM::t2Bcc:
5303         Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5304         break;
5305     }
5306   }
5307 
5308   // now decide on encoding size based on branch target range
5309   switch(Inst.getOpcode()) {
5310     // classify tB as either t2B or t1B based on range of immediate operand
5311     case ARM::tB: {
5312       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5313       if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5314         Inst.setOpcode(ARM::t2B);
5315       break;
5316     }
5317     // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5318     case ARM::tBcc: {
5319       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5320       if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5321         Inst.setOpcode(ARM::t2Bcc);
5322       break;
5323     }
5324   }
5325   ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
5326   ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
5327 }
5328 
5329 /// Parse an ARM memory expression, return false if successful else return true
5330 /// or an error.  The first token must be a '[' when called.
5331 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5332   MCAsmParser &Parser = getParser();
5333   SMLoc S, E;
5334   if (Parser.getTok().isNot(AsmToken::LBrac))
5335     return TokError("Token is not a Left Bracket");
5336   S = Parser.getTok().getLoc();
5337   Parser.Lex(); // Eat left bracket token.
5338 
5339   const AsmToken &BaseRegTok = Parser.getTok();
5340   int BaseRegNum = tryParseRegister();
5341   if (BaseRegNum == -1)
5342     return Error(BaseRegTok.getLoc(), "register expected");
5343 
5344   // The next token must either be a comma, a colon or a closing bracket.
5345   const AsmToken &Tok = Parser.getTok();
5346   if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5347       !Tok.is(AsmToken::RBrac))
5348     return Error(Tok.getLoc(), "malformed memory operand");
5349 
5350   if (Tok.is(AsmToken::RBrac)) {
5351     E = Tok.getEndLoc();
5352     Parser.Lex(); // Eat right bracket token.
5353 
5354     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5355                                              ARM_AM::no_shift, 0, 0, false,
5356                                              S, E));
5357 
5358     // If there's a pre-indexing writeback marker, '!', just add it as a token
5359     // operand. It's rather odd, but syntactically valid.
5360     if (Parser.getTok().is(AsmToken::Exclaim)) {
5361       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5362       Parser.Lex(); // Eat the '!'.
5363     }
5364 
5365     return false;
5366   }
5367 
5368   assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5369          "Lost colon or comma in memory operand?!");
5370   if (Tok.is(AsmToken::Comma)) {
5371     Parser.Lex(); // Eat the comma.
5372   }
5373 
5374   // If we have a ':', it's an alignment specifier.
5375   if (Parser.getTok().is(AsmToken::Colon)) {
5376     Parser.Lex(); // Eat the ':'.
5377     E = Parser.getTok().getLoc();
5378     SMLoc AlignmentLoc = Tok.getLoc();
5379 
5380     const MCExpr *Expr;
5381     if (getParser().parseExpression(Expr))
5382      return true;
5383 
5384     // The expression has to be a constant. Memory references with relocations
5385     // don't come through here, as they use the <label> forms of the relevant
5386     // instructions.
5387     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5388     if (!CE)
5389       return Error (E, "constant expression expected");
5390 
5391     unsigned Align = 0;
5392     switch (CE->getValue()) {
5393     default:
5394       return Error(E,
5395                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5396     case 16:  Align = 2; break;
5397     case 32:  Align = 4; break;
5398     case 64:  Align = 8; break;
5399     case 128: Align = 16; break;
5400     case 256: Align = 32; break;
5401     }
5402 
5403     // Now we should have the closing ']'
5404     if (Parser.getTok().isNot(AsmToken::RBrac))
5405       return Error(Parser.getTok().getLoc(), "']' expected");
5406     E = Parser.getTok().getEndLoc();
5407     Parser.Lex(); // Eat right bracket token.
5408 
5409     // Don't worry about range checking the value here. That's handled by
5410     // the is*() predicates.
5411     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5412                                              ARM_AM::no_shift, 0, Align,
5413                                              false, S, E, AlignmentLoc));
5414 
5415     // If there's a pre-indexing writeback marker, '!', just add it as a token
5416     // operand.
5417     if (Parser.getTok().is(AsmToken::Exclaim)) {
5418       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5419       Parser.Lex(); // Eat the '!'.
5420     }
5421 
5422     return false;
5423   }
5424 
5425   // If we have a '#', it's an immediate offset, else assume it's a register
5426   // offset. Be friendly and also accept a plain integer (without a leading
5427   // hash) for gas compatibility.
5428   if (Parser.getTok().is(AsmToken::Hash) ||
5429       Parser.getTok().is(AsmToken::Dollar) ||
5430       Parser.getTok().is(AsmToken::Integer)) {
5431     if (Parser.getTok().isNot(AsmToken::Integer))
5432       Parser.Lex(); // Eat '#' or '$'.
5433     E = Parser.getTok().getLoc();
5434 
5435     bool isNegative = getParser().getTok().is(AsmToken::Minus);
5436     const MCExpr *Offset;
5437     if (getParser().parseExpression(Offset))
5438      return true;
5439 
5440     // The expression has to be a constant. Memory references with relocations
5441     // don't come through here, as they use the <label> forms of the relevant
5442     // instructions.
5443     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5444     if (!CE)
5445       return Error (E, "constant expression expected");
5446 
5447     // If the constant was #-0, represent it as
5448     // std::numeric_limits<int32_t>::min().
5449     int32_t Val = CE->getValue();
5450     if (isNegative && Val == 0)
5451       CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5452                                   getContext());
5453 
5454     // Now we should have the closing ']'
5455     if (Parser.getTok().isNot(AsmToken::RBrac))
5456       return Error(Parser.getTok().getLoc(), "']' expected");
5457     E = Parser.getTok().getEndLoc();
5458     Parser.Lex(); // Eat right bracket token.
5459 
5460     // Don't worry about range checking the value here. That's handled by
5461     // the is*() predicates.
5462     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
5463                                              ARM_AM::no_shift, 0, 0,
5464                                              false, S, E));
5465 
5466     // If there's a pre-indexing writeback marker, '!', just add it as a token
5467     // operand.
5468     if (Parser.getTok().is(AsmToken::Exclaim)) {
5469       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5470       Parser.Lex(); // Eat the '!'.
5471     }
5472 
5473     return false;
5474   }
5475 
5476   // The register offset is optionally preceded by a '+' or '-'
5477   bool isNegative = false;
5478   if (Parser.getTok().is(AsmToken::Minus)) {
5479     isNegative = true;
5480     Parser.Lex(); // Eat the '-'.
5481   } else if (Parser.getTok().is(AsmToken::Plus)) {
5482     // Nothing to do.
5483     Parser.Lex(); // Eat the '+'.
5484   }
5485 
5486   E = Parser.getTok().getLoc();
5487   int OffsetRegNum = tryParseRegister();
5488   if (OffsetRegNum == -1)
5489     return Error(E, "register expected");
5490 
5491   // If there's a shift operator, handle it.
5492   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
5493   unsigned ShiftImm = 0;
5494   if (Parser.getTok().is(AsmToken::Comma)) {
5495     Parser.Lex(); // Eat the ','.
5496     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5497       return true;
5498   }
5499 
5500   // Now we should have the closing ']'
5501   if (Parser.getTok().isNot(AsmToken::RBrac))
5502     return Error(Parser.getTok().getLoc(), "']' expected");
5503   E = Parser.getTok().getEndLoc();
5504   Parser.Lex(); // Eat right bracket token.
5505 
5506   Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5507                                            ShiftType, ShiftImm, 0, isNegative,
5508                                            S, E));
5509 
5510   // If there's a pre-indexing writeback marker, '!', just add it as a token
5511   // operand.
5512   if (Parser.getTok().is(AsmToken::Exclaim)) {
5513     Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5514     Parser.Lex(); // Eat the '!'.
5515   }
5516 
5517   return false;
5518 }
5519 
5520 /// parseMemRegOffsetShift - one of these two:
5521 ///   ( lsl | lsr | asr | ror ) , # shift_amount
5522 ///   rrx
5523 /// return true if it parses a shift otherwise it returns false.
5524 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5525                                           unsigned &Amount) {
5526   MCAsmParser &Parser = getParser();
5527   SMLoc Loc = Parser.getTok().getLoc();
5528   const AsmToken &Tok = Parser.getTok();
5529   if (Tok.isNot(AsmToken::Identifier))
5530     return Error(Loc, "illegal shift operator");
5531   StringRef ShiftName = Tok.getString();
5532   if (ShiftName == "lsl" || ShiftName == "LSL" ||
5533       ShiftName == "asl" || ShiftName == "ASL")
5534     St = ARM_AM::lsl;
5535   else if (ShiftName == "lsr" || ShiftName == "LSR")
5536     St = ARM_AM::lsr;
5537   else if (ShiftName == "asr" || ShiftName == "ASR")
5538     St = ARM_AM::asr;
5539   else if (ShiftName == "ror" || ShiftName == "ROR")
5540     St = ARM_AM::ror;
5541   else if (ShiftName == "rrx" || ShiftName == "RRX")
5542     St = ARM_AM::rrx;
5543   else
5544     return Error(Loc, "illegal shift operator");
5545   Parser.Lex(); // Eat shift type token.
5546 
5547   // rrx stands alone.
5548   Amount = 0;
5549   if (St != ARM_AM::rrx) {
5550     Loc = Parser.getTok().getLoc();
5551     // A '#' and a shift amount.
5552     const AsmToken &HashTok = Parser.getTok();
5553     if (HashTok.isNot(AsmToken::Hash) &&
5554         HashTok.isNot(AsmToken::Dollar))
5555       return Error(HashTok.getLoc(), "'#' expected");
5556     Parser.Lex(); // Eat hash token.
5557 
5558     const MCExpr *Expr;
5559     if (getParser().parseExpression(Expr))
5560       return true;
5561     // Range check the immediate.
5562     // lsl, ror: 0 <= imm <= 31
5563     // lsr, asr: 0 <= imm <= 32
5564     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5565     if (!CE)
5566       return Error(Loc, "shift amount must be an immediate");
5567     int64_t Imm = CE->getValue();
5568     if (Imm < 0 ||
5569         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5570         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5571       return Error(Loc, "immediate shift value out of range");
5572     // If <ShiftTy> #0, turn it into a no_shift.
5573     if (Imm == 0)
5574       St = ARM_AM::lsl;
5575     // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5576     if (Imm == 32)
5577       Imm = 0;
5578     Amount = Imm;
5579   }
5580 
5581   return false;
5582 }
5583 
5584 /// parseFPImm - A floating point immediate expression operand.
5585 OperandMatchResultTy
5586 ARMAsmParser::parseFPImm(OperandVector &Operands) {
5587   MCAsmParser &Parser = getParser();
5588   // Anything that can accept a floating point constant as an operand
5589   // needs to go through here, as the regular parseExpression is
5590   // integer only.
5591   //
5592   // This routine still creates a generic Immediate operand, containing
5593   // a bitcast of the 64-bit floating point value. The various operands
5594   // that accept floats can check whether the value is valid for them
5595   // via the standard is*() predicates.
5596 
5597   SMLoc S = Parser.getTok().getLoc();
5598 
5599   if (Parser.getTok().isNot(AsmToken::Hash) &&
5600       Parser.getTok().isNot(AsmToken::Dollar))
5601     return MatchOperand_NoMatch;
5602 
5603   // Disambiguate the VMOV forms that can accept an FP immediate.
5604   // vmov.f32 <sreg>, #imm
5605   // vmov.f64 <dreg>, #imm
5606   // vmov.f32 <dreg>, #imm  @ vector f32x2
5607   // vmov.f32 <qreg>, #imm  @ vector f32x4
5608   //
5609   // There are also the NEON VMOV instructions which expect an
5610   // integer constant. Make sure we don't try to parse an FPImm
5611   // for these:
5612   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5613   ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5614   bool isVmovf = TyOp.isToken() &&
5615                  (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5616                   TyOp.getToken() == ".f16");
5617   ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5618   bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5619                                          Mnemonic.getToken() == "fconsts");
5620   if (!(isVmovf || isFconst))
5621     return MatchOperand_NoMatch;
5622 
5623   Parser.Lex(); // Eat '#' or '$'.
5624 
5625   // Handle negation, as that still comes through as a separate token.
5626   bool isNegative = false;
5627   if (Parser.getTok().is(AsmToken::Minus)) {
5628     isNegative = true;
5629     Parser.Lex();
5630   }
5631   const AsmToken &Tok = Parser.getTok();
5632   SMLoc Loc = Tok.getLoc();
5633   if (Tok.is(AsmToken::Real) && isVmovf) {
5634     APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
5635     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5636     // If we had a '-' in front, toggle the sign bit.
5637     IntVal ^= (uint64_t)isNegative << 31;
5638     Parser.Lex(); // Eat the token.
5639     Operands.push_back(ARMOperand::CreateImm(
5640           MCConstantExpr::create(IntVal, getContext()),
5641           S, Parser.getTok().getLoc()));
5642     return MatchOperand_Success;
5643   }
5644   // Also handle plain integers. Instructions which allow floating point
5645   // immediates also allow a raw encoded 8-bit value.
5646   if (Tok.is(AsmToken::Integer) && isFconst) {
5647     int64_t Val = Tok.getIntVal();
5648     Parser.Lex(); // Eat the token.
5649     if (Val > 255 || Val < 0) {
5650       Error(Loc, "encoded floating point value out of range");
5651       return MatchOperand_ParseFail;
5652     }
5653     float RealVal = ARM_AM::getFPImmFloat(Val);
5654     Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
5655 
5656     Operands.push_back(ARMOperand::CreateImm(
5657         MCConstantExpr::create(Val, getContext()), S,
5658         Parser.getTok().getLoc()));
5659     return MatchOperand_Success;
5660   }
5661 
5662   Error(Loc, "invalid floating point immediate");
5663   return MatchOperand_ParseFail;
5664 }
5665 
5666 /// Parse a arm instruction operand.  For now this parses the operand regardless
5667 /// of the mnemonic.
5668 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
5669   MCAsmParser &Parser = getParser();
5670   SMLoc S, E;
5671 
5672   // Check if the current operand has a custom associated parser, if so, try to
5673   // custom parse the operand, or fallback to the general approach.
5674   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
5675   if (ResTy == MatchOperand_Success)
5676     return false;
5677   // If there wasn't a custom match, try the generic matcher below. Otherwise,
5678   // there was a match, but an error occurred, in which case, just return that
5679   // the operand parsing failed.
5680   if (ResTy == MatchOperand_ParseFail)
5681     return true;
5682 
5683   switch (getLexer().getKind()) {
5684   default:
5685     Error(Parser.getTok().getLoc(), "unexpected token in operand");
5686     return true;
5687   case AsmToken::Identifier: {
5688     // If we've seen a branch mnemonic, the next operand must be a label.  This
5689     // is true even if the label is a register name.  So "br r1" means branch to
5690     // label "r1".
5691     bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
5692     if (!ExpectLabel) {
5693       if (!tryParseRegisterWithWriteBack(Operands))
5694         return false;
5695       int Res = tryParseShiftRegister(Operands);
5696       if (Res == 0) // success
5697         return false;
5698       else if (Res == -1) // irrecoverable error
5699         return true;
5700       // If this is VMRS, check for the apsr_nzcv operand.
5701       if (Mnemonic == "vmrs" &&
5702           Parser.getTok().getString().equals_lower("apsr_nzcv")) {
5703         S = Parser.getTok().getLoc();
5704         Parser.Lex();
5705         Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
5706         return false;
5707       }
5708     }
5709 
5710     // Fall though for the Identifier case that is not a register or a
5711     // special name.
5712     LLVM_FALLTHROUGH;
5713   }
5714   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
5715   case AsmToken::Integer: // things like 1f and 2b as a branch targets
5716   case AsmToken::String:  // quoted label names.
5717   case AsmToken::Dot: {   // . as a branch target
5718     // This was not a register so parse other operands that start with an
5719     // identifier (like labels) as expressions and create them as immediates.
5720     const MCExpr *IdVal;
5721     S = Parser.getTok().getLoc();
5722     if (getParser().parseExpression(IdVal))
5723       return true;
5724     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5725     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
5726     return false;
5727   }
5728   case AsmToken::LBrac:
5729     return parseMemory(Operands);
5730   case AsmToken::LCurly:
5731     return parseRegisterList(Operands, !Mnemonic.startswith("clr"));
5732   case AsmToken::Dollar:
5733   case AsmToken::Hash:
5734     // #42 -> immediate.
5735     S = Parser.getTok().getLoc();
5736     Parser.Lex();
5737 
5738     if (Parser.getTok().isNot(AsmToken::Colon)) {
5739       bool isNegative = Parser.getTok().is(AsmToken::Minus);
5740       const MCExpr *ImmVal;
5741       if (getParser().parseExpression(ImmVal))
5742         return true;
5743       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
5744       if (CE) {
5745         int32_t Val = CE->getValue();
5746         if (isNegative && Val == 0)
5747           ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5748                                           getContext());
5749       }
5750       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5751       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
5752 
5753       // There can be a trailing '!' on operands that we want as a separate
5754       // '!' Token operand. Handle that here. For example, the compatibility
5755       // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
5756       if (Parser.getTok().is(AsmToken::Exclaim)) {
5757         Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
5758                                                    Parser.getTok().getLoc()));
5759         Parser.Lex(); // Eat exclaim token
5760       }
5761       return false;
5762     }
5763     // w/ a ':' after the '#', it's just like a plain ':'.
5764     LLVM_FALLTHROUGH;
5765 
5766   case AsmToken::Colon: {
5767     S = Parser.getTok().getLoc();
5768     // ":lower16:" and ":upper16:" expression prefixes
5769     // FIXME: Check it's an expression prefix,
5770     // e.g. (FOO - :lower16:BAR) isn't legal.
5771     ARMMCExpr::VariantKind RefKind;
5772     if (parsePrefix(RefKind))
5773       return true;
5774 
5775     const MCExpr *SubExprVal;
5776     if (getParser().parseExpression(SubExprVal))
5777       return true;
5778 
5779     const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
5780                                               getContext());
5781     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5782     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
5783     return false;
5784   }
5785   case AsmToken::Equal: {
5786     S = Parser.getTok().getLoc();
5787     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5788       return Error(S, "unexpected token in operand");
5789     Parser.Lex(); // Eat '='
5790     const MCExpr *SubExprVal;
5791     if (getParser().parseExpression(SubExprVal))
5792       return true;
5793     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5794 
5795     // execute-only: we assume that assembly programmers know what they are
5796     // doing and allow literal pool creation here
5797     Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
5798     return false;
5799   }
5800   }
5801 }
5802 
5803 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
5804 //  :lower16: and :upper16:.
5805 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
5806   MCAsmParser &Parser = getParser();
5807   RefKind = ARMMCExpr::VK_ARM_None;
5808 
5809   // consume an optional '#' (GNU compatibility)
5810   if (getLexer().is(AsmToken::Hash))
5811     Parser.Lex();
5812 
5813   // :lower16: and :upper16: modifiers
5814   assert(getLexer().is(AsmToken::Colon) && "expected a :");
5815   Parser.Lex(); // Eat ':'
5816 
5817   if (getLexer().isNot(AsmToken::Identifier)) {
5818     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
5819     return true;
5820   }
5821 
5822   enum {
5823     COFF = (1 << MCObjectFileInfo::IsCOFF),
5824     ELF = (1 << MCObjectFileInfo::IsELF),
5825     MACHO = (1 << MCObjectFileInfo::IsMachO),
5826     WASM = (1 << MCObjectFileInfo::IsWasm),
5827   };
5828   static const struct PrefixEntry {
5829     const char *Spelling;
5830     ARMMCExpr::VariantKind VariantKind;
5831     uint8_t SupportedFormats;
5832   } PrefixEntries[] = {
5833     { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
5834     { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
5835   };
5836 
5837   StringRef IDVal = Parser.getTok().getIdentifier();
5838 
5839   const auto &Prefix =
5840       std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
5841                    [&IDVal](const PrefixEntry &PE) {
5842                       return PE.Spelling == IDVal;
5843                    });
5844   if (Prefix == std::end(PrefixEntries)) {
5845     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
5846     return true;
5847   }
5848 
5849   uint8_t CurrentFormat;
5850   switch (getContext().getObjectFileInfo()->getObjectFileType()) {
5851   case MCObjectFileInfo::IsMachO:
5852     CurrentFormat = MACHO;
5853     break;
5854   case MCObjectFileInfo::IsELF:
5855     CurrentFormat = ELF;
5856     break;
5857   case MCObjectFileInfo::IsCOFF:
5858     CurrentFormat = COFF;
5859     break;
5860   case MCObjectFileInfo::IsWasm:
5861     CurrentFormat = WASM;
5862     break;
5863   case MCObjectFileInfo::IsXCOFF:
5864     llvm_unreachable("unexpected object format");
5865     break;
5866   }
5867 
5868   if (~Prefix->SupportedFormats & CurrentFormat) {
5869     Error(Parser.getTok().getLoc(),
5870           "cannot represent relocation in the current file format");
5871     return true;
5872   }
5873 
5874   RefKind = Prefix->VariantKind;
5875   Parser.Lex();
5876 
5877   if (getLexer().isNot(AsmToken::Colon)) {
5878     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
5879     return true;
5880   }
5881   Parser.Lex(); // Eat the last ':'
5882 
5883   return false;
5884 }
5885 
5886 /// Given a mnemonic, split out possible predication code and carry
5887 /// setting letters to form a canonical mnemonic and flags.
5888 //
5889 // FIXME: Would be nice to autogen this.
5890 // FIXME: This is a bit of a maze of special cases.
5891 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
5892                                       StringRef ExtraToken,
5893                                       unsigned &PredicationCode,
5894                                       unsigned &VPTPredicationCode,
5895                                       bool &CarrySetting,
5896                                       unsigned &ProcessorIMod,
5897                                       StringRef &ITMask) {
5898   PredicationCode = ARMCC::AL;
5899   VPTPredicationCode = ARMVCC::None;
5900   CarrySetting = false;
5901   ProcessorIMod = 0;
5902 
5903   // Ignore some mnemonics we know aren't predicated forms.
5904   //
5905   // FIXME: Would be nice to autogen this.
5906   if ((Mnemonic == "movs" && isThumb()) ||
5907       Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
5908       Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
5909       Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
5910       Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
5911       Mnemonic == "vaclt" || Mnemonic == "vacle"  || Mnemonic == "hlt" ||
5912       Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
5913       Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
5914       Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
5915       Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
5916       Mnemonic == "vcvta" || Mnemonic == "vcvtn"  || Mnemonic == "vcvtp" ||
5917       Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
5918       Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
5919       Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
5920       Mnemonic == "bxns"  || Mnemonic == "blxns" ||
5921       Mnemonic == "vudot" || Mnemonic == "vsdot" ||
5922       Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
5923       Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
5924       Mnemonic == "wls" || Mnemonic == "le" || Mnemonic == "dls" ||
5925       Mnemonic == "csel" || Mnemonic == "csinc" ||
5926       Mnemonic == "csinv" || Mnemonic == "csneg" || Mnemonic == "cinc" ||
5927       Mnemonic == "cinv" || Mnemonic == "cneg" || Mnemonic == "cset" ||
5928       Mnemonic == "csetm")
5929     return Mnemonic;
5930 
5931   // First, split out any predication code. Ignore mnemonics we know aren't
5932   // predicated but do have a carry-set and so weren't caught above.
5933   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
5934       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
5935       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
5936       Mnemonic != "sbcs" && Mnemonic != "rscs" &&
5937       !(hasMVE() &&
5938         (Mnemonic == "vmine" ||
5939          Mnemonic == "vshle" || Mnemonic == "vshlt" || Mnemonic == "vshllt"))) {
5940     unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
5941     if (CC != ~0U) {
5942       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
5943       PredicationCode = CC;
5944     }
5945   }
5946 
5947   // Next, determine if we have a carry setting bit. We explicitly ignore all
5948   // the instructions we know end in 's'.
5949   if (Mnemonic.endswith("s") &&
5950       !(Mnemonic == "cps" || Mnemonic == "mls" ||
5951         Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
5952         Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
5953         Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
5954         Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
5955         Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
5956         Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
5957         Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
5958         Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
5959         Mnemonic == "bxns" || Mnemonic == "blxns" ||
5960         (Mnemonic == "movs" && isThumb()))) {
5961     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
5962     CarrySetting = true;
5963   }
5964 
5965   // The "cps" instruction can have a interrupt mode operand which is glued into
5966   // the mnemonic. Check if this is the case, split it and parse the imod op
5967   if (Mnemonic.startswith("cps")) {
5968     // Split out any imod code.
5969     unsigned IMod =
5970       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
5971       .Case("ie", ARM_PROC::IE)
5972       .Case("id", ARM_PROC::ID)
5973       .Default(~0U);
5974     if (IMod != ~0U) {
5975       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
5976       ProcessorIMod = IMod;
5977     }
5978   }
5979 
5980   if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" &&
5981       Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" &&
5982       Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" &&
5983       Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt") {
5984     unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1));
5985     if (CC != ~0U) {
5986       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
5987       VPTPredicationCode = CC;
5988     }
5989     return Mnemonic;
5990   }
5991 
5992   // The "it" instruction has the condition mask on the end of the mnemonic.
5993   if (Mnemonic.startswith("it")) {
5994     ITMask = Mnemonic.slice(2, Mnemonic.size());
5995     Mnemonic = Mnemonic.slice(0, 2);
5996   }
5997 
5998   if (Mnemonic.startswith("vpst")) {
5999     ITMask = Mnemonic.slice(4, Mnemonic.size());
6000     Mnemonic = Mnemonic.slice(0, 4);
6001   }
6002   else if (Mnemonic.startswith("vpt")) {
6003     ITMask = Mnemonic.slice(3, Mnemonic.size());
6004     Mnemonic = Mnemonic.slice(0, 3);
6005   }
6006 
6007   return Mnemonic;
6008 }
6009 
6010 /// Given a canonical mnemonic, determine if the instruction ever allows
6011 /// inclusion of carry set or predication code operands.
6012 //
6013 // FIXME: It would be nice to autogen this.
6014 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6015                                          StringRef ExtraToken,
6016                                          StringRef FullInst,
6017                                          bool &CanAcceptCarrySet,
6018                                          bool &CanAcceptPredicationCode,
6019                                          bool &CanAcceptVPTPredicationCode) {
6020   CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6021 
6022   CanAcceptCarrySet =
6023       Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6024       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
6025       Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
6026       Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
6027       Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
6028       Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
6029       Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
6030       (!isThumb() &&
6031        (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
6032         Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
6033 
6034   if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
6035       Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
6036       Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
6037       Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
6038       Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
6039       Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
6040       Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
6041       Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
6042       Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
6043       Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
6044       (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
6045       Mnemonic == "vmovx" || Mnemonic == "vins" ||
6046       Mnemonic == "vudot" || Mnemonic == "vsdot" ||
6047       Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6048       Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
6049       Mnemonic == "sb"    || Mnemonic == "ssbb"  ||
6050       Mnemonic == "pssbb" ||
6051       Mnemonic == "bfcsel" || Mnemonic == "wls" ||
6052       Mnemonic == "dls" || Mnemonic == "le" || Mnemonic == "csel" ||
6053       Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
6054       Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
6055       Mnemonic == "cset" || Mnemonic == "csetm" ||
6056       Mnemonic.startswith("vpt") || Mnemonic.startswith("vpst")) {
6057     // These mnemonics are never predicable
6058     CanAcceptPredicationCode = false;
6059   } else if (!isThumb()) {
6060     // Some instructions are only predicable in Thumb mode
6061     CanAcceptPredicationCode =
6062         Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
6063         Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
6064         Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
6065         Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
6066         Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
6067         Mnemonic != "stc2" && Mnemonic != "stc2l" &&
6068         Mnemonic != "tsb" &&
6069         !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
6070   } else if (isThumbOne()) {
6071     if (hasV6MOps())
6072       CanAcceptPredicationCode = Mnemonic != "movs";
6073     else
6074       CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
6075   } else
6076     CanAcceptPredicationCode = true;
6077 }
6078 
6079 // Some Thumb instructions have two operand forms that are not
6080 // available as three operand, convert to two operand form if possible.
6081 //
6082 // FIXME: We would really like to be able to tablegen'erate this.
6083 void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
6084                                                  bool CarrySetting,
6085                                                  OperandVector &Operands) {
6086   if (Operands.size() != 6)
6087     return;
6088 
6089   const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6090         auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
6091   if (!Op3.isReg() || !Op4.isReg())
6092     return;
6093 
6094   auto Op3Reg = Op3.getReg();
6095   auto Op4Reg = Op4.getReg();
6096 
6097   // For most Thumb2 cases we just generate the 3 operand form and reduce
6098   // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
6099   // won't accept SP or PC so we do the transformation here taking care
6100   // with immediate range in the 'add sp, sp #imm' case.
6101   auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
6102   if (isThumbTwo()) {
6103     if (Mnemonic != "add")
6104       return;
6105     bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6106                         (Op5.isReg() && Op5.getReg() == ARM::PC);
6107     if (!TryTransform) {
6108       TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6109                       (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6110                      !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6111                        Op5.isImm() && !Op5.isImm0_508s4());
6112     }
6113     if (!TryTransform)
6114       return;
6115   } else if (!isThumbOne())
6116     return;
6117 
6118   if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
6119         Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6120         Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
6121         Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
6122     return;
6123 
6124   // If first 2 operands of a 3 operand instruction are the same
6125   // then transform to 2 operand version of the same instruction
6126   // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
6127   bool Transform = Op3Reg == Op4Reg;
6128 
6129   // For communtative operations, we might be able to transform if we swap
6130   // Op4 and Op5.  The 'ADD Rdm, SP, Rdm' form is already handled specially
6131   // as tADDrsp.
6132   const ARMOperand *LastOp = &Op5;
6133   bool Swap = false;
6134   if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6135       ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
6136        Mnemonic == "and" || Mnemonic == "eor" ||
6137        Mnemonic == "adc" || Mnemonic == "orr")) {
6138     Swap = true;
6139     LastOp = &Op4;
6140     Transform = true;
6141   }
6142 
6143   // If both registers are the same then remove one of them from
6144   // the operand list, with certain exceptions.
6145   if (Transform) {
6146     // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
6147     // 2 operand forms don't exist.
6148     if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
6149         LastOp->isReg())
6150       Transform = false;
6151 
6152     // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
6153     // 3-bits because the ARMARM says not to.
6154     if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
6155       Transform = false;
6156   }
6157 
6158   if (Transform) {
6159     if (Swap)
6160       std::swap(Op4, Op5);
6161     Operands.erase(Operands.begin() + 3);
6162   }
6163 }
6164 
6165 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
6166                                           OperandVector &Operands) {
6167   // FIXME: This is all horribly hacky. We really need a better way to deal
6168   // with optional operands like this in the matcher table.
6169 
6170   // The 'mov' mnemonic is special. One variant has a cc_out operand, while
6171   // another does not. Specifically, the MOVW instruction does not. So we
6172   // special case it here and remove the defaulted (non-setting) cc_out
6173   // operand if that's the instruction we're trying to match.
6174   //
6175   // We do this as post-processing of the explicit operands rather than just
6176   // conditionally adding the cc_out in the first place because we need
6177   // to check the type of the parsed immediate operand.
6178   if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
6179       !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
6180       static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
6181       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6182     return true;
6183 
6184   // Register-register 'add' for thumb does not have a cc_out operand
6185   // when there are only two register operands.
6186   if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
6187       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6188       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6189       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6190     return true;
6191   // Register-register 'add' for thumb does not have a cc_out operand
6192   // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
6193   // have to check the immediate range here since Thumb2 has a variant
6194   // that can handle a different range and has a cc_out operand.
6195   if (((isThumb() && Mnemonic == "add") ||
6196        (isThumbTwo() && Mnemonic == "sub")) &&
6197       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6198       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6199       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
6200       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6201       ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
6202        static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
6203     return true;
6204   // For Thumb2, add/sub immediate does not have a cc_out operand for the
6205   // imm0_4095 variant. That's the least-preferred variant when
6206   // selecting via the generic "add" mnemonic, so to know that we
6207   // should remove the cc_out operand, we have to explicitly check that
6208   // it's not one of the other variants. Ugh.
6209   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6210       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6211       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6212       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6213     // Nest conditions rather than one big 'if' statement for readability.
6214     //
6215     // If both registers are low, we're in an IT block, and the immediate is
6216     // in range, we should use encoding T1 instead, which has a cc_out.
6217     if (inITBlock() &&
6218         isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
6219         isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
6220         static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
6221       return false;
6222     // Check against T3. If the second register is the PC, this is an
6223     // alternate form of ADR, which uses encoding T4, so check for that too.
6224     if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
6225         static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
6226       return false;
6227 
6228     // Otherwise, we use encoding T4, which does not have a cc_out
6229     // operand.
6230     return true;
6231   }
6232 
6233   // The thumb2 multiply instruction doesn't have a CCOut register, so
6234   // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
6235   // use the 16-bit encoding or not.
6236   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
6237       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6238       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6239       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6240       static_cast<ARMOperand &>(*Operands[5]).isReg() &&
6241       // If the registers aren't low regs, the destination reg isn't the
6242       // same as one of the source regs, or the cc_out operand is zero
6243       // outside of an IT block, we have to use the 32-bit encoding, so
6244       // remove the cc_out operand.
6245       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6246        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6247        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
6248        !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6249                             static_cast<ARMOperand &>(*Operands[5]).getReg() &&
6250                         static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6251                             static_cast<ARMOperand &>(*Operands[4]).getReg())))
6252     return true;
6253 
6254   // Also check the 'mul' syntax variant that doesn't specify an explicit
6255   // destination register.
6256   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
6257       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6258       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6259       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6260       // If the registers aren't low regs  or the cc_out operand is zero
6261       // outside of an IT block, we have to use the 32-bit encoding, so
6262       // remove the cc_out operand.
6263       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6264        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6265        !inITBlock()))
6266     return true;
6267 
6268   // Register-register 'add/sub' for thumb does not have a cc_out operand
6269   // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
6270   // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
6271   // right, this will result in better diagnostics (which operand is off)
6272   // anyway.
6273   if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
6274       (Operands.size() == 5 || Operands.size() == 6) &&
6275       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6276       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
6277       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6278       (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
6279        (Operands.size() == 6 &&
6280         static_cast<ARMOperand &>(*Operands[5]).isImm())))
6281     return true;
6282 
6283   return false;
6284 }
6285 
6286 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
6287                                               OperandVector &Operands) {
6288   // VRINT{Z, X} have a predicate operand in VFP, but not in NEON
6289   unsigned RegIdx = 3;
6290   if ((Mnemonic == "vrintz" || Mnemonic == "vrintx") &&
6291       (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
6292        static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
6293     if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6294         (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
6295          static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
6296       RegIdx = 4;
6297 
6298     if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
6299         (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6300              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
6301          ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6302              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
6303       return true;
6304   }
6305   return false;
6306 }
6307 
6308 bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
6309                                                     OperandVector &Operands) {
6310   if (!hasMVE() || Operands.size() < 3)
6311     return true;
6312 
6313   for (auto &Operand : Operands) {
6314     // We check the larger class QPR instead of just the legal class
6315     // MQPR, to more accurately report errors when using Q registers
6316     // outside of the allowed range.
6317     if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6318         (Operand->isReg() &&
6319          (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6320              Operand->getReg()))))
6321       return false;
6322   }
6323   return true;
6324 }
6325 
6326 static bool isDataTypeToken(StringRef Tok) {
6327   return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
6328     Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
6329     Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
6330     Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
6331     Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
6332     Tok == ".f" || Tok == ".d";
6333 }
6334 
6335 // FIXME: This bit should probably be handled via an explicit match class
6336 // in the .td files that matches the suffix instead of having it be
6337 // a literal string token the way it is now.
6338 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
6339   return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
6340 }
6341 
6342 static void applyMnemonicAliases(StringRef &Mnemonic,
6343                                  const FeatureBitset &Features,
6344                                  unsigned VariantID);
6345 
6346 // The GNU assembler has aliases of ldrd and strd with the second register
6347 // omitted. We don't have a way to do that in tablegen, so fix it up here.
6348 //
6349 // We have to be careful to not emit an invalid Rt2 here, because the rest of
6350 // the assmebly parser could then generate confusing diagnostics refering to
6351 // it. If we do find anything that prevents us from doing the transformation we
6352 // bail out, and let the assembly parser report an error on the instruction as
6353 // it is written.
6354 void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6355                                      OperandVector &Operands) {
6356   if (Mnemonic != "ldrd" && Mnemonic != "strd")
6357     return;
6358   if (Operands.size() < 4)
6359     return;
6360 
6361   ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
6362   ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6363 
6364   if (!Op2.isReg())
6365     return;
6366   if (!Op3.isMem())
6367     return;
6368 
6369   const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
6370   if (!GPR.contains(Op2.getReg()))
6371     return;
6372 
6373   unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
6374   if (!isThumb() && (RtEncoding & 1)) {
6375     // In ARM mode, the registers must be from an aligned pair, this
6376     // restriction does not apply in Thumb mode.
6377     return;
6378   }
6379   if (Op2.getReg() == ARM::PC)
6380     return;
6381   unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
6382   if (!PairedReg || PairedReg == ARM::PC ||
6383       (PairedReg == ARM::SP && !hasV8Ops()))
6384     return;
6385 
6386   Operands.insert(
6387       Operands.begin() + 3,
6388       ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6389 }
6390 
6391 /// Parse an arm instruction mnemonic followed by its operands.
6392 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
6393                                     SMLoc NameLoc, OperandVector &Operands) {
6394   MCAsmParser &Parser = getParser();
6395 
6396   // Apply mnemonic aliases before doing anything else, as the destination
6397   // mnemonic may include suffices and we want to handle them normally.
6398   // The generic tblgen'erated code does this later, at the start of
6399   // MatchInstructionImpl(), but that's too late for aliases that include
6400   // any sort of suffix.
6401   const FeatureBitset &AvailableFeatures = getAvailableFeatures();
6402   unsigned AssemblerDialect = getParser().getAssemblerDialect();
6403   applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
6404 
6405   // First check for the ARM-specific .req directive.
6406   if (Parser.getTok().is(AsmToken::Identifier) &&
6407       Parser.getTok().getIdentifier() == ".req") {
6408     parseDirectiveReq(Name, NameLoc);
6409     // We always return 'error' for this, as we're done with this
6410     // statement and don't need to match the 'instruction."
6411     return true;
6412   }
6413 
6414   // Create the leading tokens for the mnemonic, split by '.' characters.
6415   size_t Start = 0, Next = Name.find('.');
6416   StringRef Mnemonic = Name.slice(Start, Next);
6417   StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
6418 
6419   // Split out the predication code and carry setting flag from the mnemonic.
6420   unsigned PredicationCode;
6421   unsigned VPTPredicationCode;
6422   unsigned ProcessorIMod;
6423   bool CarrySetting;
6424   StringRef ITMask;
6425   Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
6426                            CarrySetting, ProcessorIMod, ITMask);
6427 
6428   // In Thumb1, only the branch (B) instruction can be predicated.
6429   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
6430     return Error(NameLoc, "conditional execution not supported in Thumb1");
6431   }
6432 
6433   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
6434 
6435   // Handle the mask for IT and VPT instructions. In ARMOperand and
6436   // MCOperand, this is stored in a format independent of the
6437   // condition code: the lowest set bit indicates the end of the
6438   // encoding, and above that, a 1 bit indicates 'else', and an 0
6439   // indicates 'then'. E.g.
6440   //    IT    -> 1000
6441   //    ITx   -> x100    (ITT -> 0100, ITE -> 1100)
6442   //    ITxy  -> xy10    (e.g. ITET -> 1010)
6443   //    ITxyz -> xyz1    (e.g. ITEET -> 1101)
6444   if (Mnemonic == "it" || Mnemonic.startswith("vpt") ||
6445       Mnemonic.startswith("vpst")) {
6446     SMLoc Loc = Mnemonic == "it"  ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
6447                 Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
6448                                     SMLoc::getFromPointer(NameLoc.getPointer() + 4);
6449     if (ITMask.size() > 3) {
6450       if (Mnemonic == "it")
6451         return Error(Loc, "too many conditions on IT instruction");
6452       return Error(Loc, "too many conditions on VPT instruction");
6453     }
6454     unsigned Mask = 8;
6455     for (unsigned i = ITMask.size(); i != 0; --i) {
6456       char pos = ITMask[i - 1];
6457       if (pos != 't' && pos != 'e') {
6458         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
6459       }
6460       Mask >>= 1;
6461       if (ITMask[i - 1] == 'e')
6462         Mask |= 8;
6463     }
6464     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
6465   }
6466 
6467   // FIXME: This is all a pretty gross hack. We should automatically handle
6468   // optional operands like this via tblgen.
6469 
6470   // Next, add the CCOut and ConditionCode operands, if needed.
6471   //
6472   // For mnemonics which can ever incorporate a carry setting bit or predication
6473   // code, our matching model involves us always generating CCOut and
6474   // ConditionCode operands to match the mnemonic "as written" and then we let
6475   // the matcher deal with finding the right instruction or generating an
6476   // appropriate error.
6477   bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
6478   getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
6479                         CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
6480 
6481   // If we had a carry-set on an instruction that can't do that, issue an
6482   // error.
6483   if (!CanAcceptCarrySet && CarrySetting) {
6484     return Error(NameLoc, "instruction '" + Mnemonic +
6485                  "' can not set flags, but 's' suffix specified");
6486   }
6487   // If we had a predication code on an instruction that can't do that, issue an
6488   // error.
6489   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
6490     return Error(NameLoc, "instruction '" + Mnemonic +
6491                  "' is not predicable, but condition code specified");
6492   }
6493 
6494   // If we had a VPT predication code on an instruction that can't do that, issue an
6495   // error.
6496   if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
6497     return Error(NameLoc, "instruction '" + Mnemonic +
6498                  "' is not VPT predicable, but VPT code T/E is specified");
6499   }
6500 
6501   // Add the carry setting operand, if necessary.
6502   if (CanAcceptCarrySet) {
6503     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
6504     Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
6505                                                Loc));
6506   }
6507 
6508   // Add the predication code operand, if necessary.
6509   if (CanAcceptPredicationCode) {
6510     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
6511                                       CarrySetting);
6512     Operands.push_back(ARMOperand::CreateCondCode(
6513                        ARMCC::CondCodes(PredicationCode), Loc));
6514   }
6515 
6516   // Add the VPT predication code operand, if necessary.
6517   // FIXME: We don't add them for the instructions filtered below as these can
6518   // have custom operands which need special parsing.  This parsing requires
6519   // the operand to be in the same place in the OperandVector as their
6520   // definition in tblgen.  Since these instructions may also have the
6521   // scalar predication operand we do not add the vector one and leave until
6522   // now to fix it up.
6523   if (CanAcceptVPTPredicationCode && Mnemonic != "vmov") {
6524     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
6525                                       CarrySetting);
6526     Operands.push_back(ARMOperand::CreateVPTPred(
6527                          ARMVCC::VPTCodes(VPTPredicationCode), Loc));
6528   }
6529 
6530   // Add the processor imod operand, if necessary.
6531   if (ProcessorIMod) {
6532     Operands.push_back(ARMOperand::CreateImm(
6533           MCConstantExpr::create(ProcessorIMod, getContext()),
6534                                  NameLoc, NameLoc));
6535   } else if (Mnemonic == "cps" && isMClass()) {
6536     return Error(NameLoc, "instruction 'cps' requires effect for M-class");
6537   }
6538 
6539   // Add the remaining tokens in the mnemonic.
6540   while (Next != StringRef::npos) {
6541     Start = Next;
6542     Next = Name.find('.', Start + 1);
6543     ExtraToken = Name.slice(Start, Next);
6544 
6545     // Some NEON instructions have an optional datatype suffix that is
6546     // completely ignored. Check for that.
6547     if (isDataTypeToken(ExtraToken) &&
6548         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
6549       continue;
6550 
6551     // For for ARM mode generate an error if the .n qualifier is used.
6552     if (ExtraToken == ".n" && !isThumb()) {
6553       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6554       return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
6555                    "arm mode");
6556     }
6557 
6558     // The .n qualifier is always discarded as that is what the tables
6559     // and matcher expect.  In ARM mode the .w qualifier has no effect,
6560     // so discard it to avoid errors that can be caused by the matcher.
6561     if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
6562       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6563       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
6564     }
6565   }
6566 
6567   // Read the remaining operands.
6568   if (getLexer().isNot(AsmToken::EndOfStatement)) {
6569     // Read the first operand.
6570     if (parseOperand(Operands, Mnemonic)) {
6571       return true;
6572     }
6573 
6574     while (parseOptionalToken(AsmToken::Comma)) {
6575       // Parse and remember the operand.
6576       if (parseOperand(Operands, Mnemonic)) {
6577         return true;
6578       }
6579     }
6580   }
6581 
6582   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
6583     return true;
6584 
6585   tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
6586 
6587   // Some instructions, mostly Thumb, have forms for the same mnemonic that
6588   // do and don't have a cc_out optional-def operand. With some spot-checks
6589   // of the operand list, we can figure out which variant we're trying to
6590   // parse and adjust accordingly before actually matching. We shouldn't ever
6591   // try to remove a cc_out operand that was explicitly set on the
6592   // mnemonic, of course (CarrySetting == true). Reason number #317 the
6593   // table driven matcher doesn't fit well with the ARM instruction set.
6594   if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
6595     Operands.erase(Operands.begin() + 1);
6596 
6597   // Some instructions have the same mnemonic, but don't always
6598   // have a predicate. Distinguish them here and delete the
6599   // appropriate predicate if needed.  This could be either the scalar
6600   // predication code or the vector predication code.
6601   if (PredicationCode == ARMCC::AL &&
6602       shouldOmitPredicateOperand(Mnemonic, Operands))
6603     Operands.erase(Operands.begin() + 1);
6604 
6605 
6606   if (hasMVE()) {
6607     if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands) &&
6608         Mnemonic == "vmov" && PredicationCode == ARMCC::LT) {
6609       // Very nasty hack to deal with the vector predicated variant of vmovlt
6610       // the scalar predicated vmov with condition 'lt'.  We can not tell them
6611       // apart until we have parsed their operands.
6612       Operands.erase(Operands.begin() + 1);
6613       Operands.erase(Operands.begin());
6614       SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer());
6615       SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
6616                                          Mnemonic.size() - 1 + CarrySetting);
6617       Operands.insert(Operands.begin(),
6618                       ARMOperand::CreateVPTPred(ARMVCC::None, PLoc));
6619       Operands.insert(Operands.begin(),
6620                       ARMOperand::CreateToken(StringRef("vmovlt"), MLoc));
6621     } else if (CanAcceptVPTPredicationCode) {
6622       // For all other instructions, make sure only one of the two
6623       // predication operands is left behind, depending on whether we should
6624       // use the vector predication.
6625       if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
6626         if (CanAcceptPredicationCode)
6627           Operands.erase(Operands.begin() + 2);
6628         else
6629           Operands.erase(Operands.begin() + 1);
6630       } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) {
6631         Operands.erase(Operands.begin() + 1);
6632       }
6633     }
6634   }
6635 
6636   if (VPTPredicationCode != ARMVCC::None) {
6637     bool usedVPTPredicationCode = false;
6638     for (unsigned I = 1; I < Operands.size(); ++I)
6639       if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
6640         usedVPTPredicationCode = true;
6641     if (!usedVPTPredicationCode) {
6642       // If we have a VPT predication code and we haven't just turned it
6643       // into an operand, then it was a mistake for splitMnemonic to
6644       // separate it from the rest of the mnemonic in the first place,
6645       // and this may lead to wrong disassembly (e.g. scalar floating
6646       // point VCMPE is actually a different instruction from VCMP, so
6647       // we mustn't treat them the same). In that situation, glue it
6648       // back on.
6649       Mnemonic = Name.slice(0, Mnemonic.size() + 1);
6650       Operands.erase(Operands.begin());
6651       Operands.insert(Operands.begin(),
6652                       ARMOperand::CreateToken(Mnemonic, NameLoc));
6653     }
6654   }
6655 
6656     // ARM mode 'blx' need special handling, as the register operand version
6657     // is predicable, but the label operand version is not. So, we can't rely
6658     // on the Mnemonic based checking to correctly figure out when to put
6659     // a k_CondCode operand in the list. If we're trying to match the label
6660     // version, remove the k_CondCode operand here.
6661     if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
6662         static_cast<ARMOperand &>(*Operands[2]).isImm())
6663       Operands.erase(Operands.begin() + 1);
6664 
6665     // Adjust operands of ldrexd/strexd to MCK_GPRPair.
6666     // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
6667     // a single GPRPair reg operand is used in the .td file to replace the two
6668     // GPRs. However, when parsing from asm, the two GRPs cannot be
6669     // automatically
6670     // expressed as a GPRPair, so we have to manually merge them.
6671     // FIXME: We would really like to be able to tablegen'erate this.
6672     if (!isThumb() && Operands.size() > 4 &&
6673         (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
6674          Mnemonic == "stlexd")) {
6675       bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
6676       unsigned Idx = isLoad ? 2 : 3;
6677       ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
6678       ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
6679 
6680       const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
6681       // Adjust only if Op1 and Op2 are GPRs.
6682       if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
6683           MRC.contains(Op2.getReg())) {
6684         unsigned Reg1 = Op1.getReg();
6685         unsigned Reg2 = Op2.getReg();
6686         unsigned Rt = MRI->getEncodingValue(Reg1);
6687         unsigned Rt2 = MRI->getEncodingValue(Reg2);
6688 
6689         // Rt2 must be Rt + 1 and Rt must be even.
6690         if (Rt + 1 != Rt2 || (Rt & 1)) {
6691           return Error(Op2.getStartLoc(),
6692                        isLoad ? "destination operands must be sequential"
6693                               : "source operands must be sequential");
6694         }
6695         unsigned NewReg = MRI->getMatchingSuperReg(
6696             Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
6697         Operands[Idx] =
6698             ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
6699         Operands.erase(Operands.begin() + Idx + 1);
6700       }
6701   }
6702 
6703   // GNU Assembler extension (compatibility).
6704   fixupGNULDRDAlias(Mnemonic, Operands);
6705 
6706   // FIXME: As said above, this is all a pretty gross hack.  This instruction
6707   // does not fit with other "subs" and tblgen.
6708   // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
6709   // so the Mnemonic is the original name "subs" and delete the predicate
6710   // operand so it will match the table entry.
6711   if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
6712       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6713       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
6714       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6715       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
6716       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6717     Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
6718     Operands.erase(Operands.begin() + 1);
6719   }
6720   return false;
6721 }
6722 
6723 // Validate context-sensitive operand constraints.
6724 
6725 // return 'true' if register list contains non-low GPR registers,
6726 // 'false' otherwise. If Reg is in the register list or is HiReg, set
6727 // 'containsReg' to true.
6728 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
6729                                  unsigned Reg, unsigned HiReg,
6730                                  bool &containsReg) {
6731   containsReg = false;
6732   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
6733     unsigned OpReg = Inst.getOperand(i).getReg();
6734     if (OpReg == Reg)
6735       containsReg = true;
6736     // Anything other than a low register isn't legal here.
6737     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
6738       return true;
6739   }
6740   return false;
6741 }
6742 
6743 // Check if the specified regisgter is in the register list of the inst,
6744 // starting at the indicated operand number.
6745 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
6746   for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
6747     unsigned OpReg = Inst.getOperand(i).getReg();
6748     if (OpReg == Reg)
6749       return true;
6750   }
6751   return false;
6752 }
6753 
6754 // Return true if instruction has the interesting property of being
6755 // allowed in IT blocks, but not being predicable.
6756 static bool instIsBreakpoint(const MCInst &Inst) {
6757     return Inst.getOpcode() == ARM::tBKPT ||
6758            Inst.getOpcode() == ARM::BKPT ||
6759            Inst.getOpcode() == ARM::tHLT ||
6760            Inst.getOpcode() == ARM::HLT;
6761 }
6762 
6763 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
6764                                        const OperandVector &Operands,
6765                                        unsigned ListNo, bool IsARPop) {
6766   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6767   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6768 
6769   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6770   bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
6771   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6772 
6773   if (!IsARPop && ListContainsSP)
6774     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6775                  "SP may not be in the register list");
6776   else if (ListContainsPC && ListContainsLR)
6777     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6778                  "PC and LR may not be in the register list simultaneously");
6779   return false;
6780 }
6781 
6782 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
6783                                        const OperandVector &Operands,
6784                                        unsigned ListNo) {
6785   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6786   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6787 
6788   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6789   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6790 
6791   if (ListContainsSP && ListContainsPC)
6792     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6793                  "SP and PC may not be in the register list");
6794   else if (ListContainsSP)
6795     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6796                  "SP may not be in the register list");
6797   else if (ListContainsPC)
6798     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6799                  "PC may not be in the register list");
6800   return false;
6801 }
6802 
6803 bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
6804                                     const OperandVector &Operands,
6805                                     bool Load, bool ARMMode, bool Writeback) {
6806   unsigned RtIndex = Load || !Writeback ? 0 : 1;
6807   unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
6808   unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
6809 
6810   if (ARMMode) {
6811     // Rt can't be R14.
6812     if (Rt == 14)
6813       return Error(Operands[3]->getStartLoc(),
6814                   "Rt can't be R14");
6815 
6816     // Rt must be even-numbered.
6817     if ((Rt & 1) == 1)
6818       return Error(Operands[3]->getStartLoc(),
6819                    "Rt must be even-numbered");
6820 
6821     // Rt2 must be Rt + 1.
6822     if (Rt2 != Rt + 1) {
6823       if (Load)
6824         return Error(Operands[3]->getStartLoc(),
6825                      "destination operands must be sequential");
6826       else
6827         return Error(Operands[3]->getStartLoc(),
6828                      "source operands must be sequential");
6829     }
6830 
6831     // FIXME: Diagnose m == 15
6832     // FIXME: Diagnose ldrd with m == t || m == t2.
6833   }
6834 
6835   if (!ARMMode && Load) {
6836     if (Rt2 == Rt)
6837       return Error(Operands[3]->getStartLoc(),
6838                    "destination operands can't be identical");
6839   }
6840 
6841   if (Writeback) {
6842     unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
6843 
6844     if (Rn == Rt || Rn == Rt2) {
6845       if (Load)
6846         return Error(Operands[3]->getStartLoc(),
6847                      "base register needs to be different from destination "
6848                      "registers");
6849       else
6850         return Error(Operands[3]->getStartLoc(),
6851                      "source register and base register can't be identical");
6852     }
6853 
6854     // FIXME: Diagnose ldrd/strd with writeback and n == 15.
6855     // (Except the immediate form of ldrd?)
6856   }
6857 
6858   return false;
6859 }
6860 
6861 static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
6862   for (unsigned i = 0; i < MCID.NumOperands; ++i) {
6863     if (ARM::isVpred(MCID.OpInfo[i].OperandType))
6864       return i;
6865   }
6866   return -1;
6867 }
6868 
6869 static bool isVectorPredicable(const MCInstrDesc &MCID) {
6870   return findFirstVectorPredOperandIdx(MCID) != -1;
6871 }
6872 
6873 // FIXME: We would really like to be able to tablegen'erate this.
6874 bool ARMAsmParser::validateInstruction(MCInst &Inst,
6875                                        const OperandVector &Operands) {
6876   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
6877   SMLoc Loc = Operands[0]->getStartLoc();
6878 
6879   // Check the IT block state first.
6880   // NOTE: BKPT and HLT instructions have the interesting property of being
6881   // allowed in IT blocks, but not being predicable. They just always execute.
6882   if (inITBlock() && !instIsBreakpoint(Inst)) {
6883     // The instruction must be predicable.
6884     if (!MCID.isPredicable())
6885       return Error(Loc, "instructions in IT block must be predicable");
6886     ARMCC::CondCodes Cond = ARMCC::CondCodes(
6887         Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
6888     if (Cond != currentITCond()) {
6889       // Find the condition code Operand to get its SMLoc information.
6890       SMLoc CondLoc;
6891       for (unsigned I = 1; I < Operands.size(); ++I)
6892         if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
6893           CondLoc = Operands[I]->getStartLoc();
6894       return Error(CondLoc, "incorrect condition in IT block; got '" +
6895                                 StringRef(ARMCondCodeToString(Cond)) +
6896                                 "', but expected '" +
6897                                 ARMCondCodeToString(currentITCond()) + "'");
6898     }
6899   // Check for non-'al' condition codes outside of the IT block.
6900   } else if (isThumbTwo() && MCID.isPredicable() &&
6901              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6902              ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
6903              Inst.getOpcode() != ARM::t2Bcc &&
6904              Inst.getOpcode() != ARM::t2BFic) {
6905     return Error(Loc, "predicated instructions must be in IT block");
6906   } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
6907              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6908                  ARMCC::AL) {
6909     return Warning(Loc, "predicated instructions should be in IT block");
6910   } else if (!MCID.isPredicable()) {
6911     // Check the instruction doesn't have a predicate operand anyway
6912     // that it's not allowed to use. Sometimes this happens in order
6913     // to keep instructions the same shape even though one cannot
6914     // legally be predicated, e.g. vmul.f16 vs vmul.f32.
6915     for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
6916       if (MCID.OpInfo[i].isPredicate()) {
6917         if (Inst.getOperand(i).getImm() != ARMCC::AL)
6918           return Error(Loc, "instruction is not predicable");
6919         break;
6920       }
6921     }
6922   }
6923 
6924   // PC-setting instructions in an IT block, but not the last instruction of
6925   // the block, are UNPREDICTABLE.
6926   if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
6927     return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
6928   }
6929 
6930   if (inVPTBlock() && !instIsBreakpoint(Inst)) {
6931     unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
6932     if (!isVectorPredicable(MCID))
6933       return Error(Loc, "instruction in VPT block must be predicable");
6934     unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
6935     unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
6936     if (Pred != VPTPred) {
6937       SMLoc PredLoc;
6938       for (unsigned I = 1; I < Operands.size(); ++I)
6939         if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
6940           PredLoc = Operands[I]->getStartLoc();
6941       return Error(PredLoc, "incorrect predication in VPT block; got '" +
6942                    StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) +
6943                    "', but expected '" +
6944                    ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
6945     }
6946   }
6947   else if (isVectorPredicable(MCID) &&
6948            Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
6949            ARMVCC::None)
6950     return Error(Loc, "VPT predicated instructions must be in VPT block");
6951 
6952   const unsigned Opcode = Inst.getOpcode();
6953   switch (Opcode) {
6954   case ARM::t2IT: {
6955     // Encoding is unpredictable if it ever results in a notional 'NV'
6956     // predicate. Since we don't parse 'NV' directly this means an 'AL'
6957     // predicate with an "else" mask bit.
6958     unsigned Cond = Inst.getOperand(0).getImm();
6959     unsigned Mask = Inst.getOperand(1).getImm();
6960 
6961     // Conditions only allowing a 't' are those with no set bit except
6962     // the lowest-order one that indicates the end of the sequence. In
6963     // other words, powers of 2.
6964     if (Cond == ARMCC::AL && countPopulation(Mask) != 1)
6965       return Error(Loc, "unpredictable IT predicate sequence");
6966     break;
6967   }
6968   case ARM::LDRD:
6969     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
6970                          /*Writeback*/false))
6971       return true;
6972     break;
6973   case ARM::LDRD_PRE:
6974   case ARM::LDRD_POST:
6975     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
6976                          /*Writeback*/true))
6977       return true;
6978     break;
6979   case ARM::t2LDRDi8:
6980     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
6981                          /*Writeback*/false))
6982       return true;
6983     break;
6984   case ARM::t2LDRD_PRE:
6985   case ARM::t2LDRD_POST:
6986     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
6987                          /*Writeback*/true))
6988       return true;
6989     break;
6990   case ARM::t2BXJ: {
6991     const unsigned RmReg = Inst.getOperand(0).getReg();
6992     // Rm = SP is no longer unpredictable in v8-A
6993     if (RmReg == ARM::SP && !hasV8Ops())
6994       return Error(Operands[2]->getStartLoc(),
6995                    "r13 (SP) is an unpredictable operand to BXJ");
6996     return false;
6997   }
6998   case ARM::STRD:
6999     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7000                          /*Writeback*/false))
7001       return true;
7002     break;
7003   case ARM::STRD_PRE:
7004   case ARM::STRD_POST:
7005     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
7006                          /*Writeback*/true))
7007       return true;
7008     break;
7009   case ARM::t2STRD_PRE:
7010   case ARM::t2STRD_POST:
7011     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false,
7012                          /*Writeback*/true))
7013       return true;
7014     break;
7015   case ARM::STR_PRE_IMM:
7016   case ARM::STR_PRE_REG:
7017   case ARM::t2STR_PRE:
7018   case ARM::STR_POST_IMM:
7019   case ARM::STR_POST_REG:
7020   case ARM::t2STR_POST:
7021   case ARM::STRH_PRE:
7022   case ARM::t2STRH_PRE:
7023   case ARM::STRH_POST:
7024   case ARM::t2STRH_POST:
7025   case ARM::STRB_PRE_IMM:
7026   case ARM::STRB_PRE_REG:
7027   case ARM::t2STRB_PRE:
7028   case ARM::STRB_POST_IMM:
7029   case ARM::STRB_POST_REG:
7030   case ARM::t2STRB_POST: {
7031     // Rt must be different from Rn.
7032     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7033     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7034 
7035     if (Rt == Rn)
7036       return Error(Operands[3]->getStartLoc(),
7037                    "source register and base register can't be identical");
7038     return false;
7039   }
7040   case ARM::LDR_PRE_IMM:
7041   case ARM::LDR_PRE_REG:
7042   case ARM::t2LDR_PRE:
7043   case ARM::LDR_POST_IMM:
7044   case ARM::LDR_POST_REG:
7045   case ARM::t2LDR_POST:
7046   case ARM::LDRH_PRE:
7047   case ARM::t2LDRH_PRE:
7048   case ARM::LDRH_POST:
7049   case ARM::t2LDRH_POST:
7050   case ARM::LDRSH_PRE:
7051   case ARM::t2LDRSH_PRE:
7052   case ARM::LDRSH_POST:
7053   case ARM::t2LDRSH_POST:
7054   case ARM::LDRB_PRE_IMM:
7055   case ARM::LDRB_PRE_REG:
7056   case ARM::t2LDRB_PRE:
7057   case ARM::LDRB_POST_IMM:
7058   case ARM::LDRB_POST_REG:
7059   case ARM::t2LDRB_POST:
7060   case ARM::LDRSB_PRE:
7061   case ARM::t2LDRSB_PRE:
7062   case ARM::LDRSB_POST:
7063   case ARM::t2LDRSB_POST: {
7064     // Rt must be different from Rn.
7065     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7066     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7067 
7068     if (Rt == Rn)
7069       return Error(Operands[3]->getStartLoc(),
7070                    "destination register and base register can't be identical");
7071     return false;
7072   }
7073   case ARM::SBFX:
7074   case ARM::t2SBFX:
7075   case ARM::UBFX:
7076   case ARM::t2UBFX: {
7077     // Width must be in range [1, 32-lsb].
7078     unsigned LSB = Inst.getOperand(2).getImm();
7079     unsigned Widthm1 = Inst.getOperand(3).getImm();
7080     if (Widthm1 >= 32 - LSB)
7081       return Error(Operands[5]->getStartLoc(),
7082                    "bitfield width must be in range [1,32-lsb]");
7083     return false;
7084   }
7085   // Notionally handles ARM::tLDMIA_UPD too.
7086   case ARM::tLDMIA: {
7087     // If we're parsing Thumb2, the .w variant is available and handles
7088     // most cases that are normally illegal for a Thumb1 LDM instruction.
7089     // We'll make the transformation in processInstruction() if necessary.
7090     //
7091     // Thumb LDM instructions are writeback iff the base register is not
7092     // in the register list.
7093     unsigned Rn = Inst.getOperand(0).getReg();
7094     bool HasWritebackToken =
7095         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7096          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
7097     bool ListContainsBase;
7098     if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
7099       return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
7100                    "registers must be in range r0-r7");
7101     // If we should have writeback, then there should be a '!' token.
7102     if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
7103       return Error(Operands[2]->getStartLoc(),
7104                    "writeback operator '!' expected");
7105     // If we should not have writeback, there must not be a '!'. This is
7106     // true even for the 32-bit wide encodings.
7107     if (ListContainsBase && HasWritebackToken)
7108       return Error(Operands[3]->getStartLoc(),
7109                    "writeback operator '!' not allowed when base register "
7110                    "in register list");
7111 
7112     if (validatetLDMRegList(Inst, Operands, 3))
7113       return true;
7114     break;
7115   }
7116   case ARM::LDMIA_UPD:
7117   case ARM::LDMDB_UPD:
7118   case ARM::LDMIB_UPD:
7119   case ARM::LDMDA_UPD:
7120     // ARM variants loading and updating the same register are only officially
7121     // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
7122     if (!hasV7Ops())
7123       break;
7124     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7125       return Error(Operands.back()->getStartLoc(),
7126                    "writeback register not allowed in register list");
7127     break;
7128   case ARM::t2LDMIA:
7129   case ARM::t2LDMDB:
7130     if (validatetLDMRegList(Inst, Operands, 3))
7131       return true;
7132     break;
7133   case ARM::t2STMIA:
7134   case ARM::t2STMDB:
7135     if (validatetSTMRegList(Inst, Operands, 3))
7136       return true;
7137     break;
7138   case ARM::t2LDMIA_UPD:
7139   case ARM::t2LDMDB_UPD:
7140   case ARM::t2STMIA_UPD:
7141   case ARM::t2STMDB_UPD:
7142     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7143       return Error(Operands.back()->getStartLoc(),
7144                    "writeback register not allowed in register list");
7145 
7146     if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
7147       if (validatetLDMRegList(Inst, Operands, 3))
7148         return true;
7149     } else {
7150       if (validatetSTMRegList(Inst, Operands, 3))
7151         return true;
7152     }
7153     break;
7154 
7155   case ARM::sysLDMIA_UPD:
7156   case ARM::sysLDMDA_UPD:
7157   case ARM::sysLDMDB_UPD:
7158   case ARM::sysLDMIB_UPD:
7159     if (!listContainsReg(Inst, 3, ARM::PC))
7160       return Error(Operands[4]->getStartLoc(),
7161                    "writeback register only allowed on system LDM "
7162                    "if PC in register-list");
7163     break;
7164   case ARM::sysSTMIA_UPD:
7165   case ARM::sysSTMDA_UPD:
7166   case ARM::sysSTMDB_UPD:
7167   case ARM::sysSTMIB_UPD:
7168     return Error(Operands[2]->getStartLoc(),
7169                  "system STM cannot have writeback register");
7170   case ARM::tMUL:
7171     // The second source operand must be the same register as the destination
7172     // operand.
7173     //
7174     // In this case, we must directly check the parsed operands because the
7175     // cvtThumbMultiply() function is written in such a way that it guarantees
7176     // this first statement is always true for the new Inst.  Essentially, the
7177     // destination is unconditionally copied into the second source operand
7178     // without checking to see if it matches what we actually parsed.
7179     if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
7180                                  ((ARMOperand &)*Operands[5]).getReg()) &&
7181         (((ARMOperand &)*Operands[3]).getReg() !=
7182          ((ARMOperand &)*Operands[4]).getReg())) {
7183       return Error(Operands[3]->getStartLoc(),
7184                    "destination register must match source register");
7185     }
7186     break;
7187 
7188   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
7189   // so only issue a diagnostic for thumb1. The instructions will be
7190   // switched to the t2 encodings in processInstruction() if necessary.
7191   case ARM::tPOP: {
7192     bool ListContainsBase;
7193     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
7194         !isThumbTwo())
7195       return Error(Operands[2]->getStartLoc(),
7196                    "registers must be in range r0-r7 or pc");
7197     if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
7198       return true;
7199     break;
7200   }
7201   case ARM::tPUSH: {
7202     bool ListContainsBase;
7203     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
7204         !isThumbTwo())
7205       return Error(Operands[2]->getStartLoc(),
7206                    "registers must be in range r0-r7 or lr");
7207     if (validatetSTMRegList(Inst, Operands, 2))
7208       return true;
7209     break;
7210   }
7211   case ARM::tSTMIA_UPD: {
7212     bool ListContainsBase, InvalidLowList;
7213     InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
7214                                           0, ListContainsBase);
7215     if (InvalidLowList && !isThumbTwo())
7216       return Error(Operands[4]->getStartLoc(),
7217                    "registers must be in range r0-r7");
7218 
7219     // This would be converted to a 32-bit stm, but that's not valid if the
7220     // writeback register is in the list.
7221     if (InvalidLowList && ListContainsBase)
7222       return Error(Operands[4]->getStartLoc(),
7223                    "writeback operator '!' not allowed when base register "
7224                    "in register list");
7225 
7226     if (validatetSTMRegList(Inst, Operands, 4))
7227       return true;
7228     break;
7229   }
7230   case ARM::tADDrSP:
7231     // If the non-SP source operand and the destination operand are not the
7232     // same, we need thumb2 (for the wide encoding), or we have an error.
7233     if (!isThumbTwo() &&
7234         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
7235       return Error(Operands[4]->getStartLoc(),
7236                    "source register must be the same as destination");
7237     }
7238     break;
7239 
7240   case ARM::t2ADDri:
7241   case ARM::t2ADDri12:
7242   case ARM::t2ADDrr:
7243   case ARM::t2ADDrs:
7244   case ARM::t2SUBri:
7245   case ARM::t2SUBri12:
7246   case ARM::t2SUBrr:
7247   case ARM::t2SUBrs:
7248     if (Inst.getOperand(0).getReg() == ARM::SP &&
7249         Inst.getOperand(1).getReg() != ARM::SP)
7250       return Error(Operands[4]->getStartLoc(),
7251                    "source register must be sp if destination is sp");
7252     break;
7253 
7254   // Final range checking for Thumb unconditional branch instructions.
7255   case ARM::tB:
7256     if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
7257       return Error(Operands[2]->getStartLoc(), "branch target out of range");
7258     break;
7259   case ARM::t2B: {
7260     int op = (Operands[2]->isImm()) ? 2 : 3;
7261     if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
7262       return Error(Operands[op]->getStartLoc(), "branch target out of range");
7263     break;
7264   }
7265   // Final range checking for Thumb conditional branch instructions.
7266   case ARM::tBcc:
7267     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
7268       return Error(Operands[2]->getStartLoc(), "branch target out of range");
7269     break;
7270   case ARM::t2Bcc: {
7271     int Op = (Operands[2]->isImm()) ? 2 : 3;
7272     if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
7273       return Error(Operands[Op]->getStartLoc(), "branch target out of range");
7274     break;
7275   }
7276   case ARM::tCBZ:
7277   case ARM::tCBNZ: {
7278     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
7279       return Error(Operands[2]->getStartLoc(), "branch target out of range");
7280     break;
7281   }
7282   case ARM::MOVi16:
7283   case ARM::MOVTi16:
7284   case ARM::t2MOVi16:
7285   case ARM::t2MOVTi16:
7286     {
7287     // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
7288     // especially when we turn it into a movw and the expression <symbol> does
7289     // not have a :lower16: or :upper16 as part of the expression.  We don't
7290     // want the behavior of silently truncating, which can be unexpected and
7291     // lead to bugs that are difficult to find since this is an easy mistake
7292     // to make.
7293     int i = (Operands[3]->isImm()) ? 3 : 4;
7294     ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
7295     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7296     if (CE) break;
7297     const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
7298     if (!E) break;
7299     const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
7300     if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
7301                        ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
7302       return Error(
7303           Op.getStartLoc(),
7304           "immediate expression for mov requires :lower16: or :upper16");
7305     break;
7306   }
7307   case ARM::HINT:
7308   case ARM::t2HINT: {
7309     unsigned Imm8 = Inst.getOperand(0).getImm();
7310     unsigned Pred = Inst.getOperand(1).getImm();
7311     // ESB is not predicable (pred must be AL). Without the RAS extension, this
7312     // behaves as any other unallocated hint.
7313     if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
7314       return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
7315                                                "predicable, but condition "
7316                                                "code specified");
7317     if (Imm8 == 0x14 && Pred != ARMCC::AL)
7318       return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
7319                                                "predicable, but condition "
7320                                                "code specified");
7321     break;
7322   }
7323   case ARM::t2WLS: {
7324     int idx = Opcode == ARM::t2WLS ? 3 : 4;
7325     if (!static_cast<ARMOperand &>(*Operands[idx]).isUnsignedOffset<11, 1>())
7326       return Error(Operands[idx]->getStartLoc(),
7327                    "loop end is out of range or not a positive multiple of 2");
7328     break;
7329   }
7330   case ARM::t2LEUpdate: {
7331     if (Inst.getOperand(2).isImm() &&
7332         !(Inst.getOperand(2).getImm() < 0 &&
7333           Inst.getOperand(2).getImm() >= -4094 &&
7334           (Inst.getOperand(2).getImm() & 1) == 0))
7335       return Error(Operands[2]->getStartLoc(),
7336                    "loop start is out of range or not a negative multiple of 2");
7337     break;
7338   }
7339   case ARM::t2BFi:
7340   case ARM::t2BFr:
7341   case ARM::t2BFLi:
7342   case ARM::t2BFLr: {
7343     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() ||
7344         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
7345       return Error(Operands[2]->getStartLoc(),
7346                    "branch location out of range or not a multiple of 2");
7347 
7348     if (Opcode == ARM::t2BFi) {
7349       if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>())
7350         return Error(Operands[3]->getStartLoc(),
7351                      "branch target out of range or not a multiple of 2");
7352     } else if (Opcode == ARM::t2BFLi) {
7353       if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>())
7354         return Error(Operands[3]->getStartLoc(),
7355                      "branch target out of range or not a multiple of 2");
7356     }
7357     break;
7358   }
7359   case ARM::t2BFic: {
7360     if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() ||
7361         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
7362       return Error(Operands[1]->getStartLoc(),
7363                    "branch location out of range or not a multiple of 2");
7364 
7365     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>())
7366       return Error(Operands[2]->getStartLoc(),
7367                    "branch target out of range or not a multiple of 2");
7368 
7369     assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
7370            "branch location and else branch target should either both be "
7371            "immediates or both labels");
7372 
7373     if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
7374       int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
7375       if (Diff != 4 && Diff != 2)
7376         return Error(
7377             Operands[3]->getStartLoc(),
7378             "else branch target must be 2 or 4 greater than the branch location");
7379     }
7380     break;
7381   }
7382   case ARM::t2CLRM: {
7383     for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
7384       if (Inst.getOperand(i).isReg() &&
7385           !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
7386               Inst.getOperand(i).getReg())) {
7387         return Error(Operands[2]->getStartLoc(),
7388                      "invalid register in register list. Valid registers are "
7389                      "r0-r12, lr/r14 and APSR.");
7390       }
7391     }
7392     break;
7393   }
7394   case ARM::DSB:
7395   case ARM::t2DSB: {
7396 
7397     if (Inst.getNumOperands() < 2)
7398       break;
7399 
7400     unsigned Option = Inst.getOperand(0).getImm();
7401     unsigned Pred = Inst.getOperand(1).getImm();
7402 
7403     // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
7404     if (Option == 0 && Pred != ARMCC::AL)
7405       return Error(Operands[1]->getStartLoc(),
7406                    "instruction 'ssbb' is not predicable, but condition code "
7407                    "specified");
7408     if (Option == 4 && Pred != ARMCC::AL)
7409       return Error(Operands[1]->getStartLoc(),
7410                    "instruction 'pssbb' is not predicable, but condition code "
7411                    "specified");
7412     break;
7413   }
7414   case ARM::VMOVRRS: {
7415     // Source registers must be sequential.
7416     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7417     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
7418     if (Sm1 != Sm + 1)
7419       return Error(Operands[5]->getStartLoc(),
7420                    "source operands must be sequential");
7421     break;
7422   }
7423   case ARM::VMOVSRR: {
7424     // Destination registers must be sequential.
7425     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7426     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7427     if (Sm1 != Sm + 1)
7428       return Error(Operands[3]->getStartLoc(),
7429                    "destination operands must be sequential");
7430     break;
7431   }
7432   case ARM::VLDMDIA:
7433   case ARM::VSTMDIA: {
7434     ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]);
7435     auto &RegList = Op.getRegList();
7436     if (RegList.size() < 1 || RegList.size() > 16)
7437       return Error(Operands[3]->getStartLoc(),
7438                    "list of registers must be at least 1 and at most 16");
7439     break;
7440   }
7441   }
7442 
7443   return false;
7444 }
7445 
7446 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
7447   switch(Opc) {
7448   default: llvm_unreachable("unexpected opcode!");
7449   // VST1LN
7450   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
7451   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
7452   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
7453   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
7454   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
7455   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
7456   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
7457   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
7458   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
7459 
7460   // VST2LN
7461   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
7462   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
7463   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
7464   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
7465   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
7466 
7467   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
7468   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
7469   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
7470   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
7471   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
7472 
7473   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
7474   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
7475   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
7476   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
7477   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
7478 
7479   // VST3LN
7480   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
7481   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
7482   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
7483   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
7484   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
7485   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
7486   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
7487   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
7488   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
7489   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
7490   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
7491   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
7492   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
7493   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
7494   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
7495 
7496   // VST3
7497   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
7498   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
7499   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
7500   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
7501   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
7502   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
7503   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
7504   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
7505   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
7506   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
7507   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
7508   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
7509   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
7510   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
7511   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
7512   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
7513   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
7514   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
7515 
7516   // VST4LN
7517   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
7518   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
7519   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
7520   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
7521   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
7522   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
7523   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
7524   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
7525   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
7526   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
7527   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
7528   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
7529   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
7530   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
7531   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
7532 
7533   // VST4
7534   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
7535   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
7536   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
7537   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
7538   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
7539   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
7540   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
7541   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
7542   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
7543   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
7544   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
7545   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
7546   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
7547   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
7548   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
7549   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
7550   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
7551   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
7552   }
7553 }
7554 
7555 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
7556   switch(Opc) {
7557   default: llvm_unreachable("unexpected opcode!");
7558   // VLD1LN
7559   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
7560   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
7561   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
7562   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
7563   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
7564   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
7565   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
7566   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
7567   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
7568 
7569   // VLD2LN
7570   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
7571   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
7572   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
7573   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
7574   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
7575   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
7576   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
7577   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
7578   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
7579   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
7580   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
7581   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
7582   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
7583   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
7584   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
7585 
7586   // VLD3DUP
7587   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
7588   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
7589   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
7590   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
7591   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
7592   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
7593   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
7594   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
7595   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
7596   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
7597   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
7598   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
7599   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
7600   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
7601   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
7602   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
7603   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
7604   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
7605 
7606   // VLD3LN
7607   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
7608   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
7609   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
7610   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
7611   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
7612   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
7613   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
7614   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
7615   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
7616   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
7617   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
7618   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
7619   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
7620   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
7621   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
7622 
7623   // VLD3
7624   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
7625   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
7626   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
7627   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
7628   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
7629   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
7630   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
7631   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
7632   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
7633   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
7634   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
7635   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
7636   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
7637   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
7638   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
7639   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
7640   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
7641   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
7642 
7643   // VLD4LN
7644   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
7645   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
7646   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
7647   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
7648   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
7649   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
7650   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
7651   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
7652   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
7653   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
7654   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
7655   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
7656   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
7657   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
7658   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
7659 
7660   // VLD4DUP
7661   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
7662   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
7663   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
7664   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
7665   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
7666   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
7667   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
7668   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
7669   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
7670   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
7671   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
7672   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
7673   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
7674   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
7675   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
7676   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
7677   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
7678   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
7679 
7680   // VLD4
7681   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
7682   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
7683   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
7684   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
7685   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
7686   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
7687   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
7688   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
7689   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
7690   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
7691   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
7692   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
7693   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
7694   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
7695   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
7696   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
7697   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
7698   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
7699   }
7700 }
7701 
7702 bool ARMAsmParser::processInstruction(MCInst &Inst,
7703                                       const OperandVector &Operands,
7704                                       MCStreamer &Out) {
7705   // Check if we have the wide qualifier, because if it's present we
7706   // must avoid selecting a 16-bit thumb instruction.
7707   bool HasWideQualifier = false;
7708   for (auto &Op : Operands) {
7709     ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
7710     if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
7711       HasWideQualifier = true;
7712       break;
7713     }
7714   }
7715 
7716   switch (Inst.getOpcode()) {
7717   // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
7718   case ARM::LDRT_POST:
7719   case ARM::LDRBT_POST: {
7720     const unsigned Opcode =
7721       (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
7722                                            : ARM::LDRBT_POST_IMM;
7723     MCInst TmpInst;
7724     TmpInst.setOpcode(Opcode);
7725     TmpInst.addOperand(Inst.getOperand(0));
7726     TmpInst.addOperand(Inst.getOperand(1));
7727     TmpInst.addOperand(Inst.getOperand(1));
7728     TmpInst.addOperand(MCOperand::createReg(0));
7729     TmpInst.addOperand(MCOperand::createImm(0));
7730     TmpInst.addOperand(Inst.getOperand(2));
7731     TmpInst.addOperand(Inst.getOperand(3));
7732     Inst = TmpInst;
7733     return true;
7734   }
7735   // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
7736   case ARM::STRT_POST:
7737   case ARM::STRBT_POST: {
7738     const unsigned Opcode =
7739       (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
7740                                            : ARM::STRBT_POST_IMM;
7741     MCInst TmpInst;
7742     TmpInst.setOpcode(Opcode);
7743     TmpInst.addOperand(Inst.getOperand(1));
7744     TmpInst.addOperand(Inst.getOperand(0));
7745     TmpInst.addOperand(Inst.getOperand(1));
7746     TmpInst.addOperand(MCOperand::createReg(0));
7747     TmpInst.addOperand(MCOperand::createImm(0));
7748     TmpInst.addOperand(Inst.getOperand(2));
7749     TmpInst.addOperand(Inst.getOperand(3));
7750     Inst = TmpInst;
7751     return true;
7752   }
7753   // Alias for alternate form of 'ADR Rd, #imm' instruction.
7754   case ARM::ADDri: {
7755     if (Inst.getOperand(1).getReg() != ARM::PC ||
7756         Inst.getOperand(5).getReg() != 0 ||
7757         !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
7758       return false;
7759     MCInst TmpInst;
7760     TmpInst.setOpcode(ARM::ADR);
7761     TmpInst.addOperand(Inst.getOperand(0));
7762     if (Inst.getOperand(2).isImm()) {
7763       // Immediate (mod_imm) will be in its encoded form, we must unencode it
7764       // before passing it to the ADR instruction.
7765       unsigned Enc = Inst.getOperand(2).getImm();
7766       TmpInst.addOperand(MCOperand::createImm(
7767         ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
7768     } else {
7769       // Turn PC-relative expression into absolute expression.
7770       // Reading PC provides the start of the current instruction + 8 and
7771       // the transform to adr is biased by that.
7772       MCSymbol *Dot = getContext().createTempSymbol();
7773       Out.EmitLabel(Dot);
7774       const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
7775       const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
7776                                                      MCSymbolRefExpr::VK_None,
7777                                                      getContext());
7778       const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
7779       const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
7780                                                      getContext());
7781       const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
7782                                                         getContext());
7783       TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
7784     }
7785     TmpInst.addOperand(Inst.getOperand(3));
7786     TmpInst.addOperand(Inst.getOperand(4));
7787     Inst = TmpInst;
7788     return true;
7789   }
7790   // Aliases for alternate PC+imm syntax of LDR instructions.
7791   case ARM::t2LDRpcrel:
7792     // Select the narrow version if the immediate will fit.
7793     if (Inst.getOperand(1).getImm() > 0 &&
7794         Inst.getOperand(1).getImm() <= 0xff &&
7795         !HasWideQualifier)
7796       Inst.setOpcode(ARM::tLDRpci);
7797     else
7798       Inst.setOpcode(ARM::t2LDRpci);
7799     return true;
7800   case ARM::t2LDRBpcrel:
7801     Inst.setOpcode(ARM::t2LDRBpci);
7802     return true;
7803   case ARM::t2LDRHpcrel:
7804     Inst.setOpcode(ARM::t2LDRHpci);
7805     return true;
7806   case ARM::t2LDRSBpcrel:
7807     Inst.setOpcode(ARM::t2LDRSBpci);
7808     return true;
7809   case ARM::t2LDRSHpcrel:
7810     Inst.setOpcode(ARM::t2LDRSHpci);
7811     return true;
7812   case ARM::LDRConstPool:
7813   case ARM::tLDRConstPool:
7814   case ARM::t2LDRConstPool: {
7815     // Pseudo instruction ldr rt, =immediate is converted to a
7816     // MOV rt, immediate if immediate is known and representable
7817     // otherwise we create a constant pool entry that we load from.
7818     MCInst TmpInst;
7819     if (Inst.getOpcode() == ARM::LDRConstPool)
7820       TmpInst.setOpcode(ARM::LDRi12);
7821     else if (Inst.getOpcode() == ARM::tLDRConstPool)
7822       TmpInst.setOpcode(ARM::tLDRpci);
7823     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
7824       TmpInst.setOpcode(ARM::t2LDRpci);
7825     const ARMOperand &PoolOperand =
7826       (HasWideQualifier ?
7827        static_cast<ARMOperand &>(*Operands[4]) :
7828        static_cast<ARMOperand &>(*Operands[3]));
7829     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
7830     // If SubExprVal is a constant we may be able to use a MOV
7831     if (isa<MCConstantExpr>(SubExprVal) &&
7832         Inst.getOperand(0).getReg() != ARM::PC &&
7833         Inst.getOperand(0).getReg() != ARM::SP) {
7834       int64_t Value =
7835         (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
7836       bool UseMov  = true;
7837       bool MovHasS = true;
7838       if (Inst.getOpcode() == ARM::LDRConstPool) {
7839         // ARM Constant
7840         if (ARM_AM::getSOImmVal(Value) != -1) {
7841           Value = ARM_AM::getSOImmVal(Value);
7842           TmpInst.setOpcode(ARM::MOVi);
7843         }
7844         else if (ARM_AM::getSOImmVal(~Value) != -1) {
7845           Value = ARM_AM::getSOImmVal(~Value);
7846           TmpInst.setOpcode(ARM::MVNi);
7847         }
7848         else if (hasV6T2Ops() &&
7849                  Value >=0 && Value < 65536) {
7850           TmpInst.setOpcode(ARM::MOVi16);
7851           MovHasS = false;
7852         }
7853         else
7854           UseMov = false;
7855       }
7856       else {
7857         // Thumb/Thumb2 Constant
7858         if (hasThumb2() &&
7859             ARM_AM::getT2SOImmVal(Value) != -1)
7860           TmpInst.setOpcode(ARM::t2MOVi);
7861         else if (hasThumb2() &&
7862                  ARM_AM::getT2SOImmVal(~Value) != -1) {
7863           TmpInst.setOpcode(ARM::t2MVNi);
7864           Value = ~Value;
7865         }
7866         else if (hasV8MBaseline() &&
7867                  Value >=0 && Value < 65536) {
7868           TmpInst.setOpcode(ARM::t2MOVi16);
7869           MovHasS = false;
7870         }
7871         else
7872           UseMov = false;
7873       }
7874       if (UseMov) {
7875         TmpInst.addOperand(Inst.getOperand(0));           // Rt
7876         TmpInst.addOperand(MCOperand::createImm(Value));  // Immediate
7877         TmpInst.addOperand(Inst.getOperand(2));           // CondCode
7878         TmpInst.addOperand(Inst.getOperand(3));           // CondCode
7879         if (MovHasS)
7880           TmpInst.addOperand(MCOperand::createReg(0));    // S
7881         Inst = TmpInst;
7882         return true;
7883       }
7884     }
7885     // No opportunity to use MOV/MVN create constant pool
7886     const MCExpr *CPLoc =
7887       getTargetStreamer().addConstantPoolEntry(SubExprVal,
7888                                                PoolOperand.getStartLoc());
7889     TmpInst.addOperand(Inst.getOperand(0));           // Rt
7890     TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
7891     if (TmpInst.getOpcode() == ARM::LDRi12)
7892       TmpInst.addOperand(MCOperand::createImm(0));    // unused offset
7893     TmpInst.addOperand(Inst.getOperand(2));           // CondCode
7894     TmpInst.addOperand(Inst.getOperand(3));           // CondCode
7895     Inst = TmpInst;
7896     return true;
7897   }
7898   // Handle NEON VST complex aliases.
7899   case ARM::VST1LNdWB_register_Asm_8:
7900   case ARM::VST1LNdWB_register_Asm_16:
7901   case ARM::VST1LNdWB_register_Asm_32: {
7902     MCInst TmpInst;
7903     // Shuffle the operands around so the lane index operand is in the
7904     // right place.
7905     unsigned Spacing;
7906     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7907     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7908     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7909     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7910     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7911     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7912     TmpInst.addOperand(Inst.getOperand(1)); // lane
7913     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7914     TmpInst.addOperand(Inst.getOperand(6));
7915     Inst = TmpInst;
7916     return true;
7917   }
7918 
7919   case ARM::VST2LNdWB_register_Asm_8:
7920   case ARM::VST2LNdWB_register_Asm_16:
7921   case ARM::VST2LNdWB_register_Asm_32:
7922   case ARM::VST2LNqWB_register_Asm_16:
7923   case ARM::VST2LNqWB_register_Asm_32: {
7924     MCInst TmpInst;
7925     // Shuffle the operands around so the lane index operand is in the
7926     // right place.
7927     unsigned Spacing;
7928     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7929     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7930     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7931     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7932     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7933     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7934     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7935                                             Spacing));
7936     TmpInst.addOperand(Inst.getOperand(1)); // lane
7937     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7938     TmpInst.addOperand(Inst.getOperand(6));
7939     Inst = TmpInst;
7940     return true;
7941   }
7942 
7943   case ARM::VST3LNdWB_register_Asm_8:
7944   case ARM::VST3LNdWB_register_Asm_16:
7945   case ARM::VST3LNdWB_register_Asm_32:
7946   case ARM::VST3LNqWB_register_Asm_16:
7947   case ARM::VST3LNqWB_register_Asm_32: {
7948     MCInst TmpInst;
7949     // Shuffle the operands around so the lane index operand is in the
7950     // right place.
7951     unsigned Spacing;
7952     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7953     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7954     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7955     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7956     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7957     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7958     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7959                                             Spacing));
7960     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7961                                             Spacing * 2));
7962     TmpInst.addOperand(Inst.getOperand(1)); // lane
7963     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7964     TmpInst.addOperand(Inst.getOperand(6));
7965     Inst = TmpInst;
7966     return true;
7967   }
7968 
7969   case ARM::VST4LNdWB_register_Asm_8:
7970   case ARM::VST4LNdWB_register_Asm_16:
7971   case ARM::VST4LNdWB_register_Asm_32:
7972   case ARM::VST4LNqWB_register_Asm_16:
7973   case ARM::VST4LNqWB_register_Asm_32: {
7974     MCInst TmpInst;
7975     // Shuffle the operands around so the lane index operand is in the
7976     // right place.
7977     unsigned Spacing;
7978     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7979     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7980     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7981     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7982     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7983     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7984     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7985                                             Spacing));
7986     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7987                                             Spacing * 2));
7988     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7989                                             Spacing * 3));
7990     TmpInst.addOperand(Inst.getOperand(1)); // lane
7991     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7992     TmpInst.addOperand(Inst.getOperand(6));
7993     Inst = TmpInst;
7994     return true;
7995   }
7996 
7997   case ARM::VST1LNdWB_fixed_Asm_8:
7998   case ARM::VST1LNdWB_fixed_Asm_16:
7999   case ARM::VST1LNdWB_fixed_Asm_32: {
8000     MCInst TmpInst;
8001     // Shuffle the operands around so the lane index operand is in the
8002     // right place.
8003     unsigned Spacing;
8004     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8005     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8006     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8007     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8008     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8009     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8010     TmpInst.addOperand(Inst.getOperand(1)); // lane
8011     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8012     TmpInst.addOperand(Inst.getOperand(5));
8013     Inst = TmpInst;
8014     return true;
8015   }
8016 
8017   case ARM::VST2LNdWB_fixed_Asm_8:
8018   case ARM::VST2LNdWB_fixed_Asm_16:
8019   case ARM::VST2LNdWB_fixed_Asm_32:
8020   case ARM::VST2LNqWB_fixed_Asm_16:
8021   case ARM::VST2LNqWB_fixed_Asm_32: {
8022     MCInst TmpInst;
8023     // Shuffle the operands around so the lane index operand is in the
8024     // right place.
8025     unsigned Spacing;
8026     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8027     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8028     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8029     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8030     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8031     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8032     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8033                                             Spacing));
8034     TmpInst.addOperand(Inst.getOperand(1)); // lane
8035     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8036     TmpInst.addOperand(Inst.getOperand(5));
8037     Inst = TmpInst;
8038     return true;
8039   }
8040 
8041   case ARM::VST3LNdWB_fixed_Asm_8:
8042   case ARM::VST3LNdWB_fixed_Asm_16:
8043   case ARM::VST3LNdWB_fixed_Asm_32:
8044   case ARM::VST3LNqWB_fixed_Asm_16:
8045   case ARM::VST3LNqWB_fixed_Asm_32: {
8046     MCInst TmpInst;
8047     // Shuffle the operands around so the lane index operand is in the
8048     // right place.
8049     unsigned Spacing;
8050     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8051     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8052     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8053     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8054     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8055     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8056     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8057                                             Spacing));
8058     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8059                                             Spacing * 2));
8060     TmpInst.addOperand(Inst.getOperand(1)); // lane
8061     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8062     TmpInst.addOperand(Inst.getOperand(5));
8063     Inst = TmpInst;
8064     return true;
8065   }
8066 
8067   case ARM::VST4LNdWB_fixed_Asm_8:
8068   case ARM::VST4LNdWB_fixed_Asm_16:
8069   case ARM::VST4LNdWB_fixed_Asm_32:
8070   case ARM::VST4LNqWB_fixed_Asm_16:
8071   case ARM::VST4LNqWB_fixed_Asm_32: {
8072     MCInst TmpInst;
8073     // Shuffle the operands around so the lane index operand is in the
8074     // right place.
8075     unsigned Spacing;
8076     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8077     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8078     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8079     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8080     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8081     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8082     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8083                                             Spacing));
8084     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8085                                             Spacing * 2));
8086     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8087                                             Spacing * 3));
8088     TmpInst.addOperand(Inst.getOperand(1)); // lane
8089     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8090     TmpInst.addOperand(Inst.getOperand(5));
8091     Inst = TmpInst;
8092     return true;
8093   }
8094 
8095   case ARM::VST1LNdAsm_8:
8096   case ARM::VST1LNdAsm_16:
8097   case ARM::VST1LNdAsm_32: {
8098     MCInst TmpInst;
8099     // Shuffle the operands around so the lane index operand is in the
8100     // right place.
8101     unsigned Spacing;
8102     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8103     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8104     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8105     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8106     TmpInst.addOperand(Inst.getOperand(1)); // lane
8107     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8108     TmpInst.addOperand(Inst.getOperand(5));
8109     Inst = TmpInst;
8110     return true;
8111   }
8112 
8113   case ARM::VST2LNdAsm_8:
8114   case ARM::VST2LNdAsm_16:
8115   case ARM::VST2LNdAsm_32:
8116   case ARM::VST2LNqAsm_16:
8117   case ARM::VST2LNqAsm_32: {
8118     MCInst TmpInst;
8119     // Shuffle the operands around so the lane index operand is in the
8120     // right place.
8121     unsigned Spacing;
8122     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8123     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8124     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8125     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8126     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8127                                             Spacing));
8128     TmpInst.addOperand(Inst.getOperand(1)); // lane
8129     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8130     TmpInst.addOperand(Inst.getOperand(5));
8131     Inst = TmpInst;
8132     return true;
8133   }
8134 
8135   case ARM::VST3LNdAsm_8:
8136   case ARM::VST3LNdAsm_16:
8137   case ARM::VST3LNdAsm_32:
8138   case ARM::VST3LNqAsm_16:
8139   case ARM::VST3LNqAsm_32: {
8140     MCInst TmpInst;
8141     // Shuffle the operands around so the lane index operand is in the
8142     // right place.
8143     unsigned Spacing;
8144     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8145     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8146     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8147     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8148     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8149                                             Spacing));
8150     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8151                                             Spacing * 2));
8152     TmpInst.addOperand(Inst.getOperand(1)); // lane
8153     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8154     TmpInst.addOperand(Inst.getOperand(5));
8155     Inst = TmpInst;
8156     return true;
8157   }
8158 
8159   case ARM::VST4LNdAsm_8:
8160   case ARM::VST4LNdAsm_16:
8161   case ARM::VST4LNdAsm_32:
8162   case ARM::VST4LNqAsm_16:
8163   case ARM::VST4LNqAsm_32: {
8164     MCInst TmpInst;
8165     // Shuffle the operands around so the lane index operand is in the
8166     // right place.
8167     unsigned Spacing;
8168     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8169     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8170     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8171     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8172     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8173                                             Spacing));
8174     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8175                                             Spacing * 2));
8176     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8177                                             Spacing * 3));
8178     TmpInst.addOperand(Inst.getOperand(1)); // lane
8179     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8180     TmpInst.addOperand(Inst.getOperand(5));
8181     Inst = TmpInst;
8182     return true;
8183   }
8184 
8185   // Handle NEON VLD complex aliases.
8186   case ARM::VLD1LNdWB_register_Asm_8:
8187   case ARM::VLD1LNdWB_register_Asm_16:
8188   case ARM::VLD1LNdWB_register_Asm_32: {
8189     MCInst TmpInst;
8190     // Shuffle the operands around so the lane index operand is in the
8191     // right place.
8192     unsigned Spacing;
8193     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8194     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8195     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8196     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8197     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8198     TmpInst.addOperand(Inst.getOperand(4)); // Rm
8199     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8200     TmpInst.addOperand(Inst.getOperand(1)); // lane
8201     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
8202     TmpInst.addOperand(Inst.getOperand(6));
8203     Inst = TmpInst;
8204     return true;
8205   }
8206 
8207   case ARM::VLD2LNdWB_register_Asm_8:
8208   case ARM::VLD2LNdWB_register_Asm_16:
8209   case ARM::VLD2LNdWB_register_Asm_32:
8210   case ARM::VLD2LNqWB_register_Asm_16:
8211   case ARM::VLD2LNqWB_register_Asm_32: {
8212     MCInst TmpInst;
8213     // Shuffle the operands around so the lane index operand is in the
8214     // right place.
8215     unsigned Spacing;
8216     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8217     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8218     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8219                                             Spacing));
8220     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8221     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8222     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8223     TmpInst.addOperand(Inst.getOperand(4)); // Rm
8224     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8225     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8226                                             Spacing));
8227     TmpInst.addOperand(Inst.getOperand(1)); // lane
8228     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
8229     TmpInst.addOperand(Inst.getOperand(6));
8230     Inst = TmpInst;
8231     return true;
8232   }
8233 
8234   case ARM::VLD3LNdWB_register_Asm_8:
8235   case ARM::VLD3LNdWB_register_Asm_16:
8236   case ARM::VLD3LNdWB_register_Asm_32:
8237   case ARM::VLD3LNqWB_register_Asm_16:
8238   case ARM::VLD3LNqWB_register_Asm_32: {
8239     MCInst TmpInst;
8240     // Shuffle the operands around so the lane index operand is in the
8241     // right place.
8242     unsigned Spacing;
8243     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8244     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8245     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8246                                             Spacing));
8247     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8248                                             Spacing * 2));
8249     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8250     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8251     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8252     TmpInst.addOperand(Inst.getOperand(4)); // Rm
8253     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8254     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8255                                             Spacing));
8256     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8257                                             Spacing * 2));
8258     TmpInst.addOperand(Inst.getOperand(1)); // lane
8259     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
8260     TmpInst.addOperand(Inst.getOperand(6));
8261     Inst = TmpInst;
8262     return true;
8263   }
8264 
8265   case ARM::VLD4LNdWB_register_Asm_8:
8266   case ARM::VLD4LNdWB_register_Asm_16:
8267   case ARM::VLD4LNdWB_register_Asm_32:
8268   case ARM::VLD4LNqWB_register_Asm_16:
8269   case ARM::VLD4LNqWB_register_Asm_32: {
8270     MCInst TmpInst;
8271     // Shuffle the operands around so the lane index operand is in the
8272     // right place.
8273     unsigned Spacing;
8274     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8275     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8276     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8277                                             Spacing));
8278     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8279                                             Spacing * 2));
8280     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8281                                             Spacing * 3));
8282     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8283     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8284     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8285     TmpInst.addOperand(Inst.getOperand(4)); // Rm
8286     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8287     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8288                                             Spacing));
8289     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8290                                             Spacing * 2));
8291     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8292                                             Spacing * 3));
8293     TmpInst.addOperand(Inst.getOperand(1)); // lane
8294     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
8295     TmpInst.addOperand(Inst.getOperand(6));
8296     Inst = TmpInst;
8297     return true;
8298   }
8299 
8300   case ARM::VLD1LNdWB_fixed_Asm_8:
8301   case ARM::VLD1LNdWB_fixed_Asm_16:
8302   case ARM::VLD1LNdWB_fixed_Asm_32: {
8303     MCInst TmpInst;
8304     // Shuffle the operands around so the lane index operand is in the
8305     // right place.
8306     unsigned Spacing;
8307     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8308     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8309     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8310     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8311     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8312     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8313     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8314     TmpInst.addOperand(Inst.getOperand(1)); // lane
8315     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8316     TmpInst.addOperand(Inst.getOperand(5));
8317     Inst = TmpInst;
8318     return true;
8319   }
8320 
8321   case ARM::VLD2LNdWB_fixed_Asm_8:
8322   case ARM::VLD2LNdWB_fixed_Asm_16:
8323   case ARM::VLD2LNdWB_fixed_Asm_32:
8324   case ARM::VLD2LNqWB_fixed_Asm_16:
8325   case ARM::VLD2LNqWB_fixed_Asm_32: {
8326     MCInst TmpInst;
8327     // Shuffle the operands around so the lane index operand is in the
8328     // right place.
8329     unsigned Spacing;
8330     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8331     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8332     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8333                                             Spacing));
8334     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8335     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8336     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8337     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8338     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8339     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8340                                             Spacing));
8341     TmpInst.addOperand(Inst.getOperand(1)); // lane
8342     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8343     TmpInst.addOperand(Inst.getOperand(5));
8344     Inst = TmpInst;
8345     return true;
8346   }
8347 
8348   case ARM::VLD3LNdWB_fixed_Asm_8:
8349   case ARM::VLD3LNdWB_fixed_Asm_16:
8350   case ARM::VLD3LNdWB_fixed_Asm_32:
8351   case ARM::VLD3LNqWB_fixed_Asm_16:
8352   case ARM::VLD3LNqWB_fixed_Asm_32: {
8353     MCInst TmpInst;
8354     // Shuffle the operands around so the lane index operand is in the
8355     // right place.
8356     unsigned Spacing;
8357     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8358     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8359     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8360                                             Spacing));
8361     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8362                                             Spacing * 2));
8363     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8364     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8365     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8366     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8367     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8368     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8369                                             Spacing));
8370     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8371                                             Spacing * 2));
8372     TmpInst.addOperand(Inst.getOperand(1)); // lane
8373     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8374     TmpInst.addOperand(Inst.getOperand(5));
8375     Inst = TmpInst;
8376     return true;
8377   }
8378 
8379   case ARM::VLD4LNdWB_fixed_Asm_8:
8380   case ARM::VLD4LNdWB_fixed_Asm_16:
8381   case ARM::VLD4LNdWB_fixed_Asm_32:
8382   case ARM::VLD4LNqWB_fixed_Asm_16:
8383   case ARM::VLD4LNqWB_fixed_Asm_32: {
8384     MCInst TmpInst;
8385     // Shuffle the operands around so the lane index operand is in the
8386     // right place.
8387     unsigned Spacing;
8388     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8389     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8390     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8391                                             Spacing));
8392     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8393                                             Spacing * 2));
8394     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8395                                             Spacing * 3));
8396     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8397     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8398     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8399     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8400     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8401     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8402                                             Spacing));
8403     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8404                                             Spacing * 2));
8405     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8406                                             Spacing * 3));
8407     TmpInst.addOperand(Inst.getOperand(1)); // lane
8408     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8409     TmpInst.addOperand(Inst.getOperand(5));
8410     Inst = TmpInst;
8411     return true;
8412   }
8413 
8414   case ARM::VLD1LNdAsm_8:
8415   case ARM::VLD1LNdAsm_16:
8416   case ARM::VLD1LNdAsm_32: {
8417     MCInst TmpInst;
8418     // Shuffle the operands around so the lane index operand is in the
8419     // right place.
8420     unsigned Spacing;
8421     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8422     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8423     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8424     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8425     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8426     TmpInst.addOperand(Inst.getOperand(1)); // lane
8427     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8428     TmpInst.addOperand(Inst.getOperand(5));
8429     Inst = TmpInst;
8430     return true;
8431   }
8432 
8433   case ARM::VLD2LNdAsm_8:
8434   case ARM::VLD2LNdAsm_16:
8435   case ARM::VLD2LNdAsm_32:
8436   case ARM::VLD2LNqAsm_16:
8437   case ARM::VLD2LNqAsm_32: {
8438     MCInst TmpInst;
8439     // Shuffle the operands around so the lane index operand is in the
8440     // right place.
8441     unsigned Spacing;
8442     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8443     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8444     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8445                                             Spacing));
8446     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8447     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8448     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8449     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8450                                             Spacing));
8451     TmpInst.addOperand(Inst.getOperand(1)); // lane
8452     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8453     TmpInst.addOperand(Inst.getOperand(5));
8454     Inst = TmpInst;
8455     return true;
8456   }
8457 
8458   case ARM::VLD3LNdAsm_8:
8459   case ARM::VLD3LNdAsm_16:
8460   case ARM::VLD3LNdAsm_32:
8461   case ARM::VLD3LNqAsm_16:
8462   case ARM::VLD3LNqAsm_32: {
8463     MCInst TmpInst;
8464     // Shuffle the operands around so the lane index operand is in the
8465     // right place.
8466     unsigned Spacing;
8467     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8468     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8469     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8470                                             Spacing));
8471     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8472                                             Spacing * 2));
8473     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8474     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8475     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8476     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8477                                             Spacing));
8478     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8479                                             Spacing * 2));
8480     TmpInst.addOperand(Inst.getOperand(1)); // lane
8481     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8482     TmpInst.addOperand(Inst.getOperand(5));
8483     Inst = TmpInst;
8484     return true;
8485   }
8486 
8487   case ARM::VLD4LNdAsm_8:
8488   case ARM::VLD4LNdAsm_16:
8489   case ARM::VLD4LNdAsm_32:
8490   case ARM::VLD4LNqAsm_16:
8491   case ARM::VLD4LNqAsm_32: {
8492     MCInst TmpInst;
8493     // Shuffle the operands around so the lane index operand is in the
8494     // right place.
8495     unsigned Spacing;
8496     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8497     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8498     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8499                                             Spacing));
8500     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8501                                             Spacing * 2));
8502     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8503                                             Spacing * 3));
8504     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8505     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8506     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8507     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8508                                             Spacing));
8509     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8510                                             Spacing * 2));
8511     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8512                                             Spacing * 3));
8513     TmpInst.addOperand(Inst.getOperand(1)); // lane
8514     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8515     TmpInst.addOperand(Inst.getOperand(5));
8516     Inst = TmpInst;
8517     return true;
8518   }
8519 
8520   // VLD3DUP single 3-element structure to all lanes instructions.
8521   case ARM::VLD3DUPdAsm_8:
8522   case ARM::VLD3DUPdAsm_16:
8523   case ARM::VLD3DUPdAsm_32:
8524   case ARM::VLD3DUPqAsm_8:
8525   case ARM::VLD3DUPqAsm_16:
8526   case ARM::VLD3DUPqAsm_32: {
8527     MCInst TmpInst;
8528     unsigned Spacing;
8529     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8530     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8531     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8532                                             Spacing));
8533     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8534                                             Spacing * 2));
8535     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8536     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8537     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8538     TmpInst.addOperand(Inst.getOperand(4));
8539     Inst = TmpInst;
8540     return true;
8541   }
8542 
8543   case ARM::VLD3DUPdWB_fixed_Asm_8:
8544   case ARM::VLD3DUPdWB_fixed_Asm_16:
8545   case ARM::VLD3DUPdWB_fixed_Asm_32:
8546   case ARM::VLD3DUPqWB_fixed_Asm_8:
8547   case ARM::VLD3DUPqWB_fixed_Asm_16:
8548   case ARM::VLD3DUPqWB_fixed_Asm_32: {
8549     MCInst TmpInst;
8550     unsigned Spacing;
8551     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8552     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8553     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8554                                             Spacing));
8555     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8556                                             Spacing * 2));
8557     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8558     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8559     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8560     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8561     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8562     TmpInst.addOperand(Inst.getOperand(4));
8563     Inst = TmpInst;
8564     return true;
8565   }
8566 
8567   case ARM::VLD3DUPdWB_register_Asm_8:
8568   case ARM::VLD3DUPdWB_register_Asm_16:
8569   case ARM::VLD3DUPdWB_register_Asm_32:
8570   case ARM::VLD3DUPqWB_register_Asm_8:
8571   case ARM::VLD3DUPqWB_register_Asm_16:
8572   case ARM::VLD3DUPqWB_register_Asm_32: {
8573     MCInst TmpInst;
8574     unsigned Spacing;
8575     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8576     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8577     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8578                                             Spacing));
8579     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8580                                             Spacing * 2));
8581     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8582     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8583     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8584     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8585     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8586     TmpInst.addOperand(Inst.getOperand(5));
8587     Inst = TmpInst;
8588     return true;
8589   }
8590 
8591   // VLD3 multiple 3-element structure instructions.
8592   case ARM::VLD3dAsm_8:
8593   case ARM::VLD3dAsm_16:
8594   case ARM::VLD3dAsm_32:
8595   case ARM::VLD3qAsm_8:
8596   case ARM::VLD3qAsm_16:
8597   case ARM::VLD3qAsm_32: {
8598     MCInst TmpInst;
8599     unsigned Spacing;
8600     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8601     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8602     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8603                                             Spacing));
8604     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8605                                             Spacing * 2));
8606     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8607     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8608     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8609     TmpInst.addOperand(Inst.getOperand(4));
8610     Inst = TmpInst;
8611     return true;
8612   }
8613 
8614   case ARM::VLD3dWB_fixed_Asm_8:
8615   case ARM::VLD3dWB_fixed_Asm_16:
8616   case ARM::VLD3dWB_fixed_Asm_32:
8617   case ARM::VLD3qWB_fixed_Asm_8:
8618   case ARM::VLD3qWB_fixed_Asm_16:
8619   case ARM::VLD3qWB_fixed_Asm_32: {
8620     MCInst TmpInst;
8621     unsigned Spacing;
8622     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8623     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8624     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8625                                             Spacing));
8626     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8627                                             Spacing * 2));
8628     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8629     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8630     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8631     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8632     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8633     TmpInst.addOperand(Inst.getOperand(4));
8634     Inst = TmpInst;
8635     return true;
8636   }
8637 
8638   case ARM::VLD3dWB_register_Asm_8:
8639   case ARM::VLD3dWB_register_Asm_16:
8640   case ARM::VLD3dWB_register_Asm_32:
8641   case ARM::VLD3qWB_register_Asm_8:
8642   case ARM::VLD3qWB_register_Asm_16:
8643   case ARM::VLD3qWB_register_Asm_32: {
8644     MCInst TmpInst;
8645     unsigned Spacing;
8646     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8647     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8648     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8649                                             Spacing));
8650     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8651                                             Spacing * 2));
8652     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8653     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8654     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8655     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8656     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8657     TmpInst.addOperand(Inst.getOperand(5));
8658     Inst = TmpInst;
8659     return true;
8660   }
8661 
8662   // VLD4DUP single 3-element structure to all lanes instructions.
8663   case ARM::VLD4DUPdAsm_8:
8664   case ARM::VLD4DUPdAsm_16:
8665   case ARM::VLD4DUPdAsm_32:
8666   case ARM::VLD4DUPqAsm_8:
8667   case ARM::VLD4DUPqAsm_16:
8668   case ARM::VLD4DUPqAsm_32: {
8669     MCInst TmpInst;
8670     unsigned Spacing;
8671     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8672     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8673     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8674                                             Spacing));
8675     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8676                                             Spacing * 2));
8677     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8678                                             Spacing * 3));
8679     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8680     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8681     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8682     TmpInst.addOperand(Inst.getOperand(4));
8683     Inst = TmpInst;
8684     return true;
8685   }
8686 
8687   case ARM::VLD4DUPdWB_fixed_Asm_8:
8688   case ARM::VLD4DUPdWB_fixed_Asm_16:
8689   case ARM::VLD4DUPdWB_fixed_Asm_32:
8690   case ARM::VLD4DUPqWB_fixed_Asm_8:
8691   case ARM::VLD4DUPqWB_fixed_Asm_16:
8692   case ARM::VLD4DUPqWB_fixed_Asm_32: {
8693     MCInst TmpInst;
8694     unsigned Spacing;
8695     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8696     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8697     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8698                                             Spacing));
8699     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8700                                             Spacing * 2));
8701     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8702                                             Spacing * 3));
8703     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8704     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8705     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8706     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8707     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8708     TmpInst.addOperand(Inst.getOperand(4));
8709     Inst = TmpInst;
8710     return true;
8711   }
8712 
8713   case ARM::VLD4DUPdWB_register_Asm_8:
8714   case ARM::VLD4DUPdWB_register_Asm_16:
8715   case ARM::VLD4DUPdWB_register_Asm_32:
8716   case ARM::VLD4DUPqWB_register_Asm_8:
8717   case ARM::VLD4DUPqWB_register_Asm_16:
8718   case ARM::VLD4DUPqWB_register_Asm_32: {
8719     MCInst TmpInst;
8720     unsigned Spacing;
8721     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8722     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8723     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8724                                             Spacing));
8725     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8726                                             Spacing * 2));
8727     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8728                                             Spacing * 3));
8729     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8730     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8731     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8732     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8733     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8734     TmpInst.addOperand(Inst.getOperand(5));
8735     Inst = TmpInst;
8736     return true;
8737   }
8738 
8739   // VLD4 multiple 4-element structure instructions.
8740   case ARM::VLD4dAsm_8:
8741   case ARM::VLD4dAsm_16:
8742   case ARM::VLD4dAsm_32:
8743   case ARM::VLD4qAsm_8:
8744   case ARM::VLD4qAsm_16:
8745   case ARM::VLD4qAsm_32: {
8746     MCInst TmpInst;
8747     unsigned Spacing;
8748     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8749     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8750     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8751                                             Spacing));
8752     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8753                                             Spacing * 2));
8754     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8755                                             Spacing * 3));
8756     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8757     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8758     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8759     TmpInst.addOperand(Inst.getOperand(4));
8760     Inst = TmpInst;
8761     return true;
8762   }
8763 
8764   case ARM::VLD4dWB_fixed_Asm_8:
8765   case ARM::VLD4dWB_fixed_Asm_16:
8766   case ARM::VLD4dWB_fixed_Asm_32:
8767   case ARM::VLD4qWB_fixed_Asm_8:
8768   case ARM::VLD4qWB_fixed_Asm_16:
8769   case ARM::VLD4qWB_fixed_Asm_32: {
8770     MCInst TmpInst;
8771     unsigned Spacing;
8772     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8773     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8774     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8775                                             Spacing));
8776     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8777                                             Spacing * 2));
8778     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8779                                             Spacing * 3));
8780     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8781     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8782     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8783     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8784     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8785     TmpInst.addOperand(Inst.getOperand(4));
8786     Inst = TmpInst;
8787     return true;
8788   }
8789 
8790   case ARM::VLD4dWB_register_Asm_8:
8791   case ARM::VLD4dWB_register_Asm_16:
8792   case ARM::VLD4dWB_register_Asm_32:
8793   case ARM::VLD4qWB_register_Asm_8:
8794   case ARM::VLD4qWB_register_Asm_16:
8795   case ARM::VLD4qWB_register_Asm_32: {
8796     MCInst TmpInst;
8797     unsigned Spacing;
8798     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8799     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8800     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8801                                             Spacing));
8802     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8803                                             Spacing * 2));
8804     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8805                                             Spacing * 3));
8806     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8807     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8808     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8809     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8810     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8811     TmpInst.addOperand(Inst.getOperand(5));
8812     Inst = TmpInst;
8813     return true;
8814   }
8815 
8816   // VST3 multiple 3-element structure instructions.
8817   case ARM::VST3dAsm_8:
8818   case ARM::VST3dAsm_16:
8819   case ARM::VST3dAsm_32:
8820   case ARM::VST3qAsm_8:
8821   case ARM::VST3qAsm_16:
8822   case ARM::VST3qAsm_32: {
8823     MCInst TmpInst;
8824     unsigned Spacing;
8825     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8826     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8827     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8828     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8829     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8830                                             Spacing));
8831     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8832                                             Spacing * 2));
8833     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8834     TmpInst.addOperand(Inst.getOperand(4));
8835     Inst = TmpInst;
8836     return true;
8837   }
8838 
8839   case ARM::VST3dWB_fixed_Asm_8:
8840   case ARM::VST3dWB_fixed_Asm_16:
8841   case ARM::VST3dWB_fixed_Asm_32:
8842   case ARM::VST3qWB_fixed_Asm_8:
8843   case ARM::VST3qWB_fixed_Asm_16:
8844   case ARM::VST3qWB_fixed_Asm_32: {
8845     MCInst TmpInst;
8846     unsigned Spacing;
8847     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8848     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8849     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8850     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8851     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8852     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8853     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8854                                             Spacing));
8855     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8856                                             Spacing * 2));
8857     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8858     TmpInst.addOperand(Inst.getOperand(4));
8859     Inst = TmpInst;
8860     return true;
8861   }
8862 
8863   case ARM::VST3dWB_register_Asm_8:
8864   case ARM::VST3dWB_register_Asm_16:
8865   case ARM::VST3dWB_register_Asm_32:
8866   case ARM::VST3qWB_register_Asm_8:
8867   case ARM::VST3qWB_register_Asm_16:
8868   case ARM::VST3qWB_register_Asm_32: {
8869     MCInst TmpInst;
8870     unsigned Spacing;
8871     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8872     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8873     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8874     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8875     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8876     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8877     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8878                                             Spacing));
8879     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8880                                             Spacing * 2));
8881     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8882     TmpInst.addOperand(Inst.getOperand(5));
8883     Inst = TmpInst;
8884     return true;
8885   }
8886 
8887   // VST4 multiple 3-element structure instructions.
8888   case ARM::VST4dAsm_8:
8889   case ARM::VST4dAsm_16:
8890   case ARM::VST4dAsm_32:
8891   case ARM::VST4qAsm_8:
8892   case ARM::VST4qAsm_16:
8893   case ARM::VST4qAsm_32: {
8894     MCInst TmpInst;
8895     unsigned Spacing;
8896     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8897     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8898     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8899     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8900     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8901                                             Spacing));
8902     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8903                                             Spacing * 2));
8904     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8905                                             Spacing * 3));
8906     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8907     TmpInst.addOperand(Inst.getOperand(4));
8908     Inst = TmpInst;
8909     return true;
8910   }
8911 
8912   case ARM::VST4dWB_fixed_Asm_8:
8913   case ARM::VST4dWB_fixed_Asm_16:
8914   case ARM::VST4dWB_fixed_Asm_32:
8915   case ARM::VST4qWB_fixed_Asm_8:
8916   case ARM::VST4qWB_fixed_Asm_16:
8917   case ARM::VST4qWB_fixed_Asm_32: {
8918     MCInst TmpInst;
8919     unsigned Spacing;
8920     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8921     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8922     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8923     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8924     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8925     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8926     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8927                                             Spacing));
8928     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8929                                             Spacing * 2));
8930     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8931                                             Spacing * 3));
8932     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8933     TmpInst.addOperand(Inst.getOperand(4));
8934     Inst = TmpInst;
8935     return true;
8936   }
8937 
8938   case ARM::VST4dWB_register_Asm_8:
8939   case ARM::VST4dWB_register_Asm_16:
8940   case ARM::VST4dWB_register_Asm_32:
8941   case ARM::VST4qWB_register_Asm_8:
8942   case ARM::VST4qWB_register_Asm_16:
8943   case ARM::VST4qWB_register_Asm_32: {
8944     MCInst TmpInst;
8945     unsigned Spacing;
8946     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8947     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8948     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8949     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8950     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8951     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8952     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8953                                             Spacing));
8954     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8955                                             Spacing * 2));
8956     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8957                                             Spacing * 3));
8958     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8959     TmpInst.addOperand(Inst.getOperand(5));
8960     Inst = TmpInst;
8961     return true;
8962   }
8963 
8964   // Handle encoding choice for the shift-immediate instructions.
8965   case ARM::t2LSLri:
8966   case ARM::t2LSRri:
8967   case ARM::t2ASRri:
8968     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8969         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8970         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8971         !HasWideQualifier) {
8972       unsigned NewOpc;
8973       switch (Inst.getOpcode()) {
8974       default: llvm_unreachable("unexpected opcode");
8975       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
8976       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
8977       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
8978       }
8979       // The Thumb1 operands aren't in the same order. Awesome, eh?
8980       MCInst TmpInst;
8981       TmpInst.setOpcode(NewOpc);
8982       TmpInst.addOperand(Inst.getOperand(0));
8983       TmpInst.addOperand(Inst.getOperand(5));
8984       TmpInst.addOperand(Inst.getOperand(1));
8985       TmpInst.addOperand(Inst.getOperand(2));
8986       TmpInst.addOperand(Inst.getOperand(3));
8987       TmpInst.addOperand(Inst.getOperand(4));
8988       Inst = TmpInst;
8989       return true;
8990     }
8991     return false;
8992 
8993   // Handle the Thumb2 mode MOV complex aliases.
8994   case ARM::t2MOVsr:
8995   case ARM::t2MOVSsr: {
8996     // Which instruction to expand to depends on the CCOut operand and
8997     // whether we're in an IT block if the register operands are low
8998     // registers.
8999     bool isNarrow = false;
9000     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
9001         isARMLowRegister(Inst.getOperand(1).getReg()) &&
9002         isARMLowRegister(Inst.getOperand(2).getReg()) &&
9003         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
9004         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
9005         !HasWideQualifier)
9006       isNarrow = true;
9007     MCInst TmpInst;
9008     unsigned newOpc;
9009     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
9010     default: llvm_unreachable("unexpected opcode!");
9011     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
9012     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
9013     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
9014     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
9015     }
9016     TmpInst.setOpcode(newOpc);
9017     TmpInst.addOperand(Inst.getOperand(0)); // Rd
9018     if (isNarrow)
9019       TmpInst.addOperand(MCOperand::createReg(
9020           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
9021     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9022     TmpInst.addOperand(Inst.getOperand(2)); // Rm
9023     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
9024     TmpInst.addOperand(Inst.getOperand(5));
9025     if (!isNarrow)
9026       TmpInst.addOperand(MCOperand::createReg(
9027           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
9028     Inst = TmpInst;
9029     return true;
9030   }
9031   case ARM::t2MOVsi:
9032   case ARM::t2MOVSsi: {
9033     // Which instruction to expand to depends on the CCOut operand and
9034     // whether we're in an IT block if the register operands are low
9035     // registers.
9036     bool isNarrow = false;
9037     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
9038         isARMLowRegister(Inst.getOperand(1).getReg()) &&
9039         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
9040         !HasWideQualifier)
9041       isNarrow = true;
9042     MCInst TmpInst;
9043     unsigned newOpc;
9044     unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
9045     unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
9046     bool isMov = false;
9047     // MOV rd, rm, LSL #0 is actually a MOV instruction
9048     if (Shift == ARM_AM::lsl && Amount == 0) {
9049       isMov = true;
9050       // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
9051       // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
9052       // unpredictable in an IT block so the 32-bit encoding T3 has to be used
9053       // instead.
9054       if (inITBlock()) {
9055         isNarrow = false;
9056       }
9057       newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
9058     } else {
9059       switch(Shift) {
9060       default: llvm_unreachable("unexpected opcode!");
9061       case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
9062       case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
9063       case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
9064       case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
9065       case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
9066       }
9067     }
9068     if (Amount == 32) Amount = 0;
9069     TmpInst.setOpcode(newOpc);
9070     TmpInst.addOperand(Inst.getOperand(0)); // Rd
9071     if (isNarrow && !isMov)
9072       TmpInst.addOperand(MCOperand::createReg(
9073           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
9074     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9075     if (newOpc != ARM::t2RRX && !isMov)
9076       TmpInst.addOperand(MCOperand::createImm(Amount));
9077     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9078     TmpInst.addOperand(Inst.getOperand(4));
9079     if (!isNarrow)
9080       TmpInst.addOperand(MCOperand::createReg(
9081           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
9082     Inst = TmpInst;
9083     return true;
9084   }
9085   // Handle the ARM mode MOV complex aliases.
9086   case ARM::ASRr:
9087   case ARM::LSRr:
9088   case ARM::LSLr:
9089   case ARM::RORr: {
9090     ARM_AM::ShiftOpc ShiftTy;
9091     switch(Inst.getOpcode()) {
9092     default: llvm_unreachable("unexpected opcode!");
9093     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
9094     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
9095     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
9096     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
9097     }
9098     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
9099     MCInst TmpInst;
9100     TmpInst.setOpcode(ARM::MOVsr);
9101     TmpInst.addOperand(Inst.getOperand(0)); // Rd
9102     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9103     TmpInst.addOperand(Inst.getOperand(2)); // Rm
9104     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
9105     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9106     TmpInst.addOperand(Inst.getOperand(4));
9107     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
9108     Inst = TmpInst;
9109     return true;
9110   }
9111   case ARM::ASRi:
9112   case ARM::LSRi:
9113   case ARM::LSLi:
9114   case ARM::RORi: {
9115     ARM_AM::ShiftOpc ShiftTy;
9116     switch(Inst.getOpcode()) {
9117     default: llvm_unreachable("unexpected opcode!");
9118     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
9119     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
9120     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
9121     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
9122     }
9123     // A shift by zero is a plain MOVr, not a MOVsi.
9124     unsigned Amt = Inst.getOperand(2).getImm();
9125     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
9126     // A shift by 32 should be encoded as 0 when permitted
9127     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
9128       Amt = 0;
9129     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
9130     MCInst TmpInst;
9131     TmpInst.setOpcode(Opc);
9132     TmpInst.addOperand(Inst.getOperand(0)); // Rd
9133     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9134     if (Opc == ARM::MOVsi)
9135       TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
9136     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9137     TmpInst.addOperand(Inst.getOperand(4));
9138     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
9139     Inst = TmpInst;
9140     return true;
9141   }
9142   case ARM::RRXi: {
9143     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
9144     MCInst TmpInst;
9145     TmpInst.setOpcode(ARM::MOVsi);
9146     TmpInst.addOperand(Inst.getOperand(0)); // Rd
9147     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9148     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
9149     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9150     TmpInst.addOperand(Inst.getOperand(3));
9151     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
9152     Inst = TmpInst;
9153     return true;
9154   }
9155   case ARM::t2LDMIA_UPD: {
9156     // If this is a load of a single register, then we should use
9157     // a post-indexed LDR instruction instead, per the ARM ARM.
9158     if (Inst.getNumOperands() != 5)
9159       return false;
9160     MCInst TmpInst;
9161     TmpInst.setOpcode(ARM::t2LDR_POST);
9162     TmpInst.addOperand(Inst.getOperand(4)); // Rt
9163     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
9164     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9165     TmpInst.addOperand(MCOperand::createImm(4));
9166     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9167     TmpInst.addOperand(Inst.getOperand(3));
9168     Inst = TmpInst;
9169     return true;
9170   }
9171   case ARM::t2STMDB_UPD: {
9172     // If this is a store of a single register, then we should use
9173     // a pre-indexed STR instruction instead, per the ARM ARM.
9174     if (Inst.getNumOperands() != 5)
9175       return false;
9176     MCInst TmpInst;
9177     TmpInst.setOpcode(ARM::t2STR_PRE);
9178     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
9179     TmpInst.addOperand(Inst.getOperand(4)); // Rt
9180     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9181     TmpInst.addOperand(MCOperand::createImm(-4));
9182     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9183     TmpInst.addOperand(Inst.getOperand(3));
9184     Inst = TmpInst;
9185     return true;
9186   }
9187   case ARM::LDMIA_UPD:
9188     // If this is a load of a single register via a 'pop', then we should use
9189     // a post-indexed LDR instruction instead, per the ARM ARM.
9190     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
9191         Inst.getNumOperands() == 5) {
9192       MCInst TmpInst;
9193       TmpInst.setOpcode(ARM::LDR_POST_IMM);
9194       TmpInst.addOperand(Inst.getOperand(4)); // Rt
9195       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
9196       TmpInst.addOperand(Inst.getOperand(1)); // Rn
9197       TmpInst.addOperand(MCOperand::createReg(0));  // am2offset
9198       TmpInst.addOperand(MCOperand::createImm(4));
9199       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9200       TmpInst.addOperand(Inst.getOperand(3));
9201       Inst = TmpInst;
9202       return true;
9203     }
9204     break;
9205   case ARM::STMDB_UPD:
9206     // If this is a store of a single register via a 'push', then we should use
9207     // a pre-indexed STR instruction instead, per the ARM ARM.
9208     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
9209         Inst.getNumOperands() == 5) {
9210       MCInst TmpInst;
9211       TmpInst.setOpcode(ARM::STR_PRE_IMM);
9212       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
9213       TmpInst.addOperand(Inst.getOperand(4)); // Rt
9214       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
9215       TmpInst.addOperand(MCOperand::createImm(-4));
9216       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9217       TmpInst.addOperand(Inst.getOperand(3));
9218       Inst = TmpInst;
9219     }
9220     break;
9221   case ARM::t2ADDri12:
9222     // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
9223     // mnemonic was used (not "addw"), encoding T3 is preferred.
9224     if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" ||
9225         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
9226       break;
9227     Inst.setOpcode(ARM::t2ADDri);
9228     Inst.addOperand(MCOperand::createReg(0)); // cc_out
9229     break;
9230   case ARM::t2SUBri12:
9231     // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
9232     // mnemonic was used (not "subw"), encoding T3 is preferred.
9233     if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" ||
9234         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
9235       break;
9236     Inst.setOpcode(ARM::t2SUBri);
9237     Inst.addOperand(MCOperand::createReg(0)); // cc_out
9238     break;
9239   case ARM::tADDi8:
9240     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
9241     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
9242     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
9243     // to encoding T1 if <Rd> is omitted."
9244     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
9245       Inst.setOpcode(ARM::tADDi3);
9246       return true;
9247     }
9248     break;
9249   case ARM::tSUBi8:
9250     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
9251     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
9252     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
9253     // to encoding T1 if <Rd> is omitted."
9254     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
9255       Inst.setOpcode(ARM::tSUBi3);
9256       return true;
9257     }
9258     break;
9259   case ARM::t2ADDri:
9260   case ARM::t2SUBri: {
9261     // If the destination and first source operand are the same, and
9262     // the flags are compatible with the current IT status, use encoding T2
9263     // instead of T3. For compatibility with the system 'as'. Make sure the
9264     // wide encoding wasn't explicit.
9265     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
9266         !isARMLowRegister(Inst.getOperand(0).getReg()) ||
9267         (Inst.getOperand(2).isImm() &&
9268          (unsigned)Inst.getOperand(2).getImm() > 255) ||
9269         Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
9270         HasWideQualifier)
9271       break;
9272     MCInst TmpInst;
9273     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
9274                       ARM::tADDi8 : ARM::tSUBi8);
9275     TmpInst.addOperand(Inst.getOperand(0));
9276     TmpInst.addOperand(Inst.getOperand(5));
9277     TmpInst.addOperand(Inst.getOperand(0));
9278     TmpInst.addOperand(Inst.getOperand(2));
9279     TmpInst.addOperand(Inst.getOperand(3));
9280     TmpInst.addOperand(Inst.getOperand(4));
9281     Inst = TmpInst;
9282     return true;
9283   }
9284   case ARM::t2ADDrr: {
9285     // If the destination and first source operand are the same, and
9286     // there's no setting of the flags, use encoding T2 instead of T3.
9287     // Note that this is only for ADD, not SUB. This mirrors the system
9288     // 'as' behaviour.  Also take advantage of ADD being commutative.
9289     // Make sure the wide encoding wasn't explicit.
9290     bool Swap = false;
9291     auto DestReg = Inst.getOperand(0).getReg();
9292     bool Transform = DestReg == Inst.getOperand(1).getReg();
9293     if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
9294       Transform = true;
9295       Swap = true;
9296     }
9297     if (!Transform ||
9298         Inst.getOperand(5).getReg() != 0 ||
9299         HasWideQualifier)
9300       break;
9301     MCInst TmpInst;
9302     TmpInst.setOpcode(ARM::tADDhirr);
9303     TmpInst.addOperand(Inst.getOperand(0));
9304     TmpInst.addOperand(Inst.getOperand(0));
9305     TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
9306     TmpInst.addOperand(Inst.getOperand(3));
9307     TmpInst.addOperand(Inst.getOperand(4));
9308     Inst = TmpInst;
9309     return true;
9310   }
9311   case ARM::tADDrSP:
9312     // If the non-SP source operand and the destination operand are not the
9313     // same, we need to use the 32-bit encoding if it's available.
9314     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
9315       Inst.setOpcode(ARM::t2ADDrr);
9316       Inst.addOperand(MCOperand::createReg(0)); // cc_out
9317       return true;
9318     }
9319     break;
9320   case ARM::tB:
9321     // A Thumb conditional branch outside of an IT block is a tBcc.
9322     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
9323       Inst.setOpcode(ARM::tBcc);
9324       return true;
9325     }
9326     break;
9327   case ARM::t2B:
9328     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
9329     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
9330       Inst.setOpcode(ARM::t2Bcc);
9331       return true;
9332     }
9333     break;
9334   case ARM::t2Bcc:
9335     // If the conditional is AL or we're in an IT block, we really want t2B.
9336     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
9337       Inst.setOpcode(ARM::t2B);
9338       return true;
9339     }
9340     break;
9341   case ARM::tBcc:
9342     // If the conditional is AL, we really want tB.
9343     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
9344       Inst.setOpcode(ARM::tB);
9345       return true;
9346     }
9347     break;
9348   case ARM::tLDMIA: {
9349     // If the register list contains any high registers, or if the writeback
9350     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
9351     // instead if we're in Thumb2. Otherwise, this should have generated
9352     // an error in validateInstruction().
9353     unsigned Rn = Inst.getOperand(0).getReg();
9354     bool hasWritebackToken =
9355         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
9356          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
9357     bool listContainsBase;
9358     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
9359         (!listContainsBase && !hasWritebackToken) ||
9360         (listContainsBase && hasWritebackToken)) {
9361       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
9362       assert(isThumbTwo());
9363       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
9364       // If we're switching to the updating version, we need to insert
9365       // the writeback tied operand.
9366       if (hasWritebackToken)
9367         Inst.insert(Inst.begin(),
9368                     MCOperand::createReg(Inst.getOperand(0).getReg()));
9369       return true;
9370     }
9371     break;
9372   }
9373   case ARM::tSTMIA_UPD: {
9374     // If the register list contains any high registers, we need to use
9375     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
9376     // should have generated an error in validateInstruction().
9377     unsigned Rn = Inst.getOperand(0).getReg();
9378     bool listContainsBase;
9379     if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
9380       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
9381       assert(isThumbTwo());
9382       Inst.setOpcode(ARM::t2STMIA_UPD);
9383       return true;
9384     }
9385     break;
9386   }
9387   case ARM::tPOP: {
9388     bool listContainsBase;
9389     // If the register list contains any high registers, we need to use
9390     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
9391     // should have generated an error in validateInstruction().
9392     if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
9393       return false;
9394     assert(isThumbTwo());
9395     Inst.setOpcode(ARM::t2LDMIA_UPD);
9396     // Add the base register and writeback operands.
9397     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
9398     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
9399     return true;
9400   }
9401   case ARM::tPUSH: {
9402     bool listContainsBase;
9403     if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
9404       return false;
9405     assert(isThumbTwo());
9406     Inst.setOpcode(ARM::t2STMDB_UPD);
9407     // Add the base register and writeback operands.
9408     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
9409     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
9410     return true;
9411   }
9412   case ARM::t2MOVi:
9413     // If we can use the 16-bit encoding and the user didn't explicitly
9414     // request the 32-bit variant, transform it here.
9415     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
9416         (Inst.getOperand(1).isImm() &&
9417          (unsigned)Inst.getOperand(1).getImm() <= 255) &&
9418         Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
9419         !HasWideQualifier) {
9420       // The operands aren't in the same order for tMOVi8...
9421       MCInst TmpInst;
9422       TmpInst.setOpcode(ARM::tMOVi8);
9423       TmpInst.addOperand(Inst.getOperand(0));
9424       TmpInst.addOperand(Inst.getOperand(4));
9425       TmpInst.addOperand(Inst.getOperand(1));
9426       TmpInst.addOperand(Inst.getOperand(2));
9427       TmpInst.addOperand(Inst.getOperand(3));
9428       Inst = TmpInst;
9429       return true;
9430     }
9431     break;
9432 
9433   case ARM::t2MOVr:
9434     // If we can use the 16-bit encoding and the user didn't explicitly
9435     // request the 32-bit variant, transform it here.
9436     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
9437         isARMLowRegister(Inst.getOperand(1).getReg()) &&
9438         Inst.getOperand(2).getImm() == ARMCC::AL &&
9439         Inst.getOperand(4).getReg() == ARM::CPSR &&
9440         !HasWideQualifier) {
9441       // The operands aren't the same for tMOV[S]r... (no cc_out)
9442       MCInst TmpInst;
9443       TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
9444       TmpInst.addOperand(Inst.getOperand(0));
9445       TmpInst.addOperand(Inst.getOperand(1));
9446       TmpInst.addOperand(Inst.getOperand(2));
9447       TmpInst.addOperand(Inst.getOperand(3));
9448       Inst = TmpInst;
9449       return true;
9450     }
9451     break;
9452 
9453   case ARM::t2SXTH:
9454   case ARM::t2SXTB:
9455   case ARM::t2UXTH:
9456   case ARM::t2UXTB:
9457     // If we can use the 16-bit encoding and the user didn't explicitly
9458     // request the 32-bit variant, transform it here.
9459     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
9460         isARMLowRegister(Inst.getOperand(1).getReg()) &&
9461         Inst.getOperand(2).getImm() == 0 &&
9462         !HasWideQualifier) {
9463       unsigned NewOpc;
9464       switch (Inst.getOpcode()) {
9465       default: llvm_unreachable("Illegal opcode!");
9466       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
9467       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
9468       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
9469       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
9470       }
9471       // The operands aren't the same for thumb1 (no rotate operand).
9472       MCInst TmpInst;
9473       TmpInst.setOpcode(NewOpc);
9474       TmpInst.addOperand(Inst.getOperand(0));
9475       TmpInst.addOperand(Inst.getOperand(1));
9476       TmpInst.addOperand(Inst.getOperand(3));
9477       TmpInst.addOperand(Inst.getOperand(4));
9478       Inst = TmpInst;
9479       return true;
9480     }
9481     break;
9482 
9483   case ARM::MOVsi: {
9484     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
9485     // rrx shifts and asr/lsr of #32 is encoded as 0
9486     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
9487       return false;
9488     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
9489       // Shifting by zero is accepted as a vanilla 'MOVr'
9490       MCInst TmpInst;
9491       TmpInst.setOpcode(ARM::MOVr);
9492       TmpInst.addOperand(Inst.getOperand(0));
9493       TmpInst.addOperand(Inst.getOperand(1));
9494       TmpInst.addOperand(Inst.getOperand(3));
9495       TmpInst.addOperand(Inst.getOperand(4));
9496       TmpInst.addOperand(Inst.getOperand(5));
9497       Inst = TmpInst;
9498       return true;
9499     }
9500     return false;
9501   }
9502   case ARM::ANDrsi:
9503   case ARM::ORRrsi:
9504   case ARM::EORrsi:
9505   case ARM::BICrsi:
9506   case ARM::SUBrsi:
9507   case ARM::ADDrsi: {
9508     unsigned newOpc;
9509     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
9510     if (SOpc == ARM_AM::rrx) return false;
9511     switch (Inst.getOpcode()) {
9512     default: llvm_unreachable("unexpected opcode!");
9513     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
9514     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
9515     case ARM::EORrsi: newOpc = ARM::EORrr; break;
9516     case ARM::BICrsi: newOpc = ARM::BICrr; break;
9517     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
9518     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
9519     }
9520     // If the shift is by zero, use the non-shifted instruction definition.
9521     // The exception is for right shifts, where 0 == 32
9522     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
9523         !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
9524       MCInst TmpInst;
9525       TmpInst.setOpcode(newOpc);
9526       TmpInst.addOperand(Inst.getOperand(0));
9527       TmpInst.addOperand(Inst.getOperand(1));
9528       TmpInst.addOperand(Inst.getOperand(2));
9529       TmpInst.addOperand(Inst.getOperand(4));
9530       TmpInst.addOperand(Inst.getOperand(5));
9531       TmpInst.addOperand(Inst.getOperand(6));
9532       Inst = TmpInst;
9533       return true;
9534     }
9535     return false;
9536   }
9537   case ARM::ITasm:
9538   case ARM::t2IT: {
9539     // Set up the IT block state according to the IT instruction we just
9540     // matched.
9541     assert(!inITBlock() && "nested IT blocks?!");
9542     startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
9543                          Inst.getOperand(1).getImm());
9544     break;
9545   }
9546   case ARM::t2LSLrr:
9547   case ARM::t2LSRrr:
9548   case ARM::t2ASRrr:
9549   case ARM::t2SBCrr:
9550   case ARM::t2RORrr:
9551   case ARM::t2BICrr:
9552     // Assemblers should use the narrow encodings of these instructions when permissible.
9553     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
9554          isARMLowRegister(Inst.getOperand(2).getReg())) &&
9555         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
9556         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
9557         !HasWideQualifier) {
9558       unsigned NewOpc;
9559       switch (Inst.getOpcode()) {
9560         default: llvm_unreachable("unexpected opcode");
9561         case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
9562         case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
9563         case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
9564         case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
9565         case ARM::t2RORrr: NewOpc = ARM::tROR; break;
9566         case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
9567       }
9568       MCInst TmpInst;
9569       TmpInst.setOpcode(NewOpc);
9570       TmpInst.addOperand(Inst.getOperand(0));
9571       TmpInst.addOperand(Inst.getOperand(5));
9572       TmpInst.addOperand(Inst.getOperand(1));
9573       TmpInst.addOperand(Inst.getOperand(2));
9574       TmpInst.addOperand(Inst.getOperand(3));
9575       TmpInst.addOperand(Inst.getOperand(4));
9576       Inst = TmpInst;
9577       return true;
9578     }
9579     return false;
9580 
9581   case ARM::t2ANDrr:
9582   case ARM::t2EORrr:
9583   case ARM::t2ADCrr:
9584   case ARM::t2ORRrr:
9585     // Assemblers should use the narrow encodings of these instructions when permissible.
9586     // These instructions are special in that they are commutable, so shorter encodings
9587     // are available more often.
9588     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
9589          isARMLowRegister(Inst.getOperand(2).getReg())) &&
9590         (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
9591          Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
9592         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
9593         !HasWideQualifier) {
9594       unsigned NewOpc;
9595       switch (Inst.getOpcode()) {
9596         default: llvm_unreachable("unexpected opcode");
9597         case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
9598         case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
9599         case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
9600         case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
9601       }
9602       MCInst TmpInst;
9603       TmpInst.setOpcode(NewOpc);
9604       TmpInst.addOperand(Inst.getOperand(0));
9605       TmpInst.addOperand(Inst.getOperand(5));
9606       if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
9607         TmpInst.addOperand(Inst.getOperand(1));
9608         TmpInst.addOperand(Inst.getOperand(2));
9609       } else {
9610         TmpInst.addOperand(Inst.getOperand(2));
9611         TmpInst.addOperand(Inst.getOperand(1));
9612       }
9613       TmpInst.addOperand(Inst.getOperand(3));
9614       TmpInst.addOperand(Inst.getOperand(4));
9615       Inst = TmpInst;
9616       return true;
9617     }
9618     return false;
9619   case ARM::MVE_VPST:
9620   case ARM::MVE_VPTv16i8:
9621   case ARM::MVE_VPTv8i16:
9622   case ARM::MVE_VPTv4i32:
9623   case ARM::MVE_VPTv16u8:
9624   case ARM::MVE_VPTv8u16:
9625   case ARM::MVE_VPTv4u32:
9626   case ARM::MVE_VPTv16s8:
9627   case ARM::MVE_VPTv8s16:
9628   case ARM::MVE_VPTv4s32:
9629   case ARM::MVE_VPTv4f32:
9630   case ARM::MVE_VPTv8f16:
9631   case ARM::MVE_VPTv16i8r:
9632   case ARM::MVE_VPTv8i16r:
9633   case ARM::MVE_VPTv4i32r:
9634   case ARM::MVE_VPTv16u8r:
9635   case ARM::MVE_VPTv8u16r:
9636   case ARM::MVE_VPTv4u32r:
9637   case ARM::MVE_VPTv16s8r:
9638   case ARM::MVE_VPTv8s16r:
9639   case ARM::MVE_VPTv4s32r:
9640   case ARM::MVE_VPTv4f32r:
9641   case ARM::MVE_VPTv8f16r: {
9642     assert(!inVPTBlock() && "Nested VPT blocks are not allowed");
9643     MCOperand &MO = Inst.getOperand(0);
9644     VPTState.Mask = MO.getImm();
9645     VPTState.CurPosition = 0;
9646     break;
9647   }
9648   }
9649   return false;
9650 }
9651 
9652 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
9653   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
9654   // suffix depending on whether they're in an IT block or not.
9655   unsigned Opc = Inst.getOpcode();
9656   const MCInstrDesc &MCID = MII.get(Opc);
9657   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
9658     assert(MCID.hasOptionalDef() &&
9659            "optionally flag setting instruction missing optional def operand");
9660     assert(MCID.NumOperands == Inst.getNumOperands() &&
9661            "operand count mismatch!");
9662     // Find the optional-def operand (cc_out).
9663     unsigned OpNo;
9664     for (OpNo = 0;
9665          !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
9666          ++OpNo)
9667       ;
9668     // If we're parsing Thumb1, reject it completely.
9669     if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
9670       return Match_RequiresFlagSetting;
9671     // If we're parsing Thumb2, which form is legal depends on whether we're
9672     // in an IT block.
9673     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
9674         !inITBlock())
9675       return Match_RequiresITBlock;
9676     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
9677         inITBlock())
9678       return Match_RequiresNotITBlock;
9679     // LSL with zero immediate is not allowed in an IT block
9680     if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
9681       return Match_RequiresNotITBlock;
9682   } else if (isThumbOne()) {
9683     // Some high-register supporting Thumb1 encodings only allow both registers
9684     // to be from r0-r7 when in Thumb2.
9685     if (Opc == ARM::tADDhirr && !hasV6MOps() &&
9686         isARMLowRegister(Inst.getOperand(1).getReg()) &&
9687         isARMLowRegister(Inst.getOperand(2).getReg()))
9688       return Match_RequiresThumb2;
9689     // Others only require ARMv6 or later.
9690     else if (Opc == ARM::tMOVr && !hasV6Ops() &&
9691              isARMLowRegister(Inst.getOperand(0).getReg()) &&
9692              isARMLowRegister(Inst.getOperand(1).getReg()))
9693       return Match_RequiresV6;
9694   }
9695 
9696   // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
9697   // than the loop below can handle, so it uses the GPRnopc register class and
9698   // we do SP handling here.
9699   if (Opc == ARM::t2MOVr && !hasV8Ops())
9700   {
9701     // SP as both source and destination is not allowed
9702     if (Inst.getOperand(0).getReg() == ARM::SP &&
9703         Inst.getOperand(1).getReg() == ARM::SP)
9704       return Match_RequiresV8;
9705     // When flags-setting SP as either source or destination is not allowed
9706     if (Inst.getOperand(4).getReg() == ARM::CPSR &&
9707         (Inst.getOperand(0).getReg() == ARM::SP ||
9708          Inst.getOperand(1).getReg() == ARM::SP))
9709       return Match_RequiresV8;
9710   }
9711 
9712   switch (Inst.getOpcode()) {
9713   case ARM::VMRS:
9714   case ARM::VMSR:
9715   case ARM::VMRS_FPCXTS:
9716   case ARM::VMRS_FPCXTNS:
9717   case ARM::VMSR_FPCXTS:
9718   case ARM::VMSR_FPCXTNS:
9719   case ARM::VMRS_FPSCR_NZCVQC:
9720   case ARM::VMSR_FPSCR_NZCVQC:
9721   case ARM::FMSTAT:
9722   case ARM::VMRS_VPR:
9723   case ARM::VMRS_P0:
9724   case ARM::VMSR_VPR:
9725   case ARM::VMSR_P0:
9726     // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
9727     // ARMv8-A.
9728     if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
9729         (isThumb() && !hasV8Ops()))
9730       return Match_InvalidOperand;
9731     break;
9732   default:
9733     break;
9734   }
9735 
9736   for (unsigned I = 0; I < MCID.NumOperands; ++I)
9737     if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
9738       // rGPRRegClass excludes PC, and also excluded SP before ARMv8
9739       if ((Inst.getOperand(I).getReg() == ARM::SP) && !hasV8Ops())
9740         return Match_RequiresV8;
9741       else if (Inst.getOperand(I).getReg() == ARM::PC)
9742         return Match_InvalidOperand;
9743     }
9744 
9745   return Match_Success;
9746 }
9747 
9748 namespace llvm {
9749 
9750 template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
9751   return true; // In an assembly source, no need to second-guess
9752 }
9753 
9754 } // end namespace llvm
9755 
9756 // Returns true if Inst is unpredictable if it is in and IT block, but is not
9757 // the last instruction in the block.
9758 bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
9759   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9760 
9761   // All branch & call instructions terminate IT blocks with the exception of
9762   // SVC.
9763   if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
9764       MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
9765     return true;
9766 
9767   // Any arithmetic instruction which writes to the PC also terminates the IT
9768   // block.
9769   if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
9770     return true;
9771 
9772   return false;
9773 }
9774 
9775 unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
9776                                           SmallVectorImpl<NearMissInfo> &NearMisses,
9777                                           bool MatchingInlineAsm,
9778                                           bool &EmitInITBlock,
9779                                           MCStreamer &Out) {
9780   // If we can't use an implicit IT block here, just match as normal.
9781   if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
9782     return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
9783 
9784   // Try to match the instruction in an extension of the current IT block (if
9785   // there is one).
9786   if (inImplicitITBlock()) {
9787     extendImplicitITBlock(ITState.Cond);
9788     if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
9789             Match_Success) {
9790       // The match succeded, but we still have to check that the instruction is
9791       // valid in this implicit IT block.
9792       const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9793       if (MCID.isPredicable()) {
9794         ARMCC::CondCodes InstCond =
9795             (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9796                 .getImm();
9797         ARMCC::CondCodes ITCond = currentITCond();
9798         if (InstCond == ITCond) {
9799           EmitInITBlock = true;
9800           return Match_Success;
9801         } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
9802           invertCurrentITCondition();
9803           EmitInITBlock = true;
9804           return Match_Success;
9805         }
9806       }
9807     }
9808     rewindImplicitITPosition();
9809   }
9810 
9811   // Finish the current IT block, and try to match outside any IT block.
9812   flushPendingInstructions(Out);
9813   unsigned PlainMatchResult =
9814       MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
9815   if (PlainMatchResult == Match_Success) {
9816     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9817     if (MCID.isPredicable()) {
9818       ARMCC::CondCodes InstCond =
9819           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9820               .getImm();
9821       // Some forms of the branch instruction have their own condition code
9822       // fields, so can be conditionally executed without an IT block.
9823       if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
9824         EmitInITBlock = false;
9825         return Match_Success;
9826       }
9827       if (InstCond == ARMCC::AL) {
9828         EmitInITBlock = false;
9829         return Match_Success;
9830       }
9831     } else {
9832       EmitInITBlock = false;
9833       return Match_Success;
9834     }
9835   }
9836 
9837   // Try to match in a new IT block. The matcher doesn't check the actual
9838   // condition, so we create an IT block with a dummy condition, and fix it up
9839   // once we know the actual condition.
9840   startImplicitITBlock();
9841   if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
9842       Match_Success) {
9843     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9844     if (MCID.isPredicable()) {
9845       ITState.Cond =
9846           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9847               .getImm();
9848       EmitInITBlock = true;
9849       return Match_Success;
9850     }
9851   }
9852   discardImplicitITBlock();
9853 
9854   // If none of these succeed, return the error we got when trying to match
9855   // outside any IT blocks.
9856   EmitInITBlock = false;
9857   return PlainMatchResult;
9858 }
9859 
9860 static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
9861                                          unsigned VariantID = 0);
9862 
9863 static const char *getSubtargetFeatureName(uint64_t Val);
9864 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
9865                                            OperandVector &Operands,
9866                                            MCStreamer &Out, uint64_t &ErrorInfo,
9867                                            bool MatchingInlineAsm) {
9868   MCInst Inst;
9869   unsigned MatchResult;
9870   bool PendConditionalInstruction = false;
9871 
9872   SmallVector<NearMissInfo, 4> NearMisses;
9873   MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
9874                                  PendConditionalInstruction, Out);
9875 
9876   switch (MatchResult) {
9877   case Match_Success:
9878     LLVM_DEBUG(dbgs() << "Parsed as: ";
9879                Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
9880                dbgs() << "\n");
9881 
9882     // Context sensitive operand constraints aren't handled by the matcher,
9883     // so check them here.
9884     if (validateInstruction(Inst, Operands)) {
9885       // Still progress the IT block, otherwise one wrong condition causes
9886       // nasty cascading errors.
9887       forwardITPosition();
9888       forwardVPTPosition();
9889       return true;
9890     }
9891 
9892     { // processInstruction() updates inITBlock state, we need to save it away
9893       bool wasInITBlock = inITBlock();
9894 
9895       // Some instructions need post-processing to, for example, tweak which
9896       // encoding is selected. Loop on it while changes happen so the
9897       // individual transformations can chain off each other. E.g.,
9898       // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
9899       while (processInstruction(Inst, Operands, Out))
9900         LLVM_DEBUG(dbgs() << "Changed to: ";
9901                    Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
9902                    dbgs() << "\n");
9903 
9904       // Only after the instruction is fully processed, we can validate it
9905       if (wasInITBlock && hasV8Ops() && isThumb() &&
9906           !isV8EligibleForIT(&Inst)) {
9907         Warning(IDLoc, "deprecated instruction in IT block");
9908       }
9909     }
9910 
9911     // Only move forward at the very end so that everything in validate
9912     // and process gets a consistent answer about whether we're in an IT
9913     // block.
9914     forwardITPosition();
9915     forwardVPTPosition();
9916 
9917     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
9918     // doesn't actually encode.
9919     if (Inst.getOpcode() == ARM::ITasm)
9920       return false;
9921 
9922     Inst.setLoc(IDLoc);
9923     if (PendConditionalInstruction) {
9924       PendingConditionalInsts.push_back(Inst);
9925       if (isITBlockFull() || isITBlockTerminator(Inst))
9926         flushPendingInstructions(Out);
9927     } else {
9928       Out.EmitInstruction(Inst, getSTI());
9929     }
9930     return false;
9931   case Match_NearMisses:
9932     ReportNearMisses(NearMisses, IDLoc, Operands);
9933     return true;
9934   case Match_MnemonicFail: {
9935     FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
9936     std::string Suggestion = ARMMnemonicSpellCheck(
9937       ((ARMOperand &)*Operands[0]).getToken(), FBS);
9938     return Error(IDLoc, "invalid instruction" + Suggestion,
9939                  ((ARMOperand &)*Operands[0]).getLocRange());
9940   }
9941   }
9942 
9943   llvm_unreachable("Implement any new match types added!");
9944 }
9945 
9946 /// parseDirective parses the arm specific directives
9947 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
9948   const MCObjectFileInfo::Environment Format =
9949     getContext().getObjectFileInfo()->getObjectFileType();
9950   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
9951   bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
9952 
9953   StringRef IDVal = DirectiveID.getIdentifier();
9954   if (IDVal == ".word")
9955     parseLiteralValues(4, DirectiveID.getLoc());
9956   else if (IDVal == ".short" || IDVal == ".hword")
9957     parseLiteralValues(2, DirectiveID.getLoc());
9958   else if (IDVal == ".thumb")
9959     parseDirectiveThumb(DirectiveID.getLoc());
9960   else if (IDVal == ".arm")
9961     parseDirectiveARM(DirectiveID.getLoc());
9962   else if (IDVal == ".thumb_func")
9963     parseDirectiveThumbFunc(DirectiveID.getLoc());
9964   else if (IDVal == ".code")
9965     parseDirectiveCode(DirectiveID.getLoc());
9966   else if (IDVal == ".syntax")
9967     parseDirectiveSyntax(DirectiveID.getLoc());
9968   else if (IDVal == ".unreq")
9969     parseDirectiveUnreq(DirectiveID.getLoc());
9970   else if (IDVal == ".fnend")
9971     parseDirectiveFnEnd(DirectiveID.getLoc());
9972   else if (IDVal == ".cantunwind")
9973     parseDirectiveCantUnwind(DirectiveID.getLoc());
9974   else if (IDVal == ".personality")
9975     parseDirectivePersonality(DirectiveID.getLoc());
9976   else if (IDVal == ".handlerdata")
9977     parseDirectiveHandlerData(DirectiveID.getLoc());
9978   else if (IDVal == ".setfp")
9979     parseDirectiveSetFP(DirectiveID.getLoc());
9980   else if (IDVal == ".pad")
9981     parseDirectivePad(DirectiveID.getLoc());
9982   else if (IDVal == ".save")
9983     parseDirectiveRegSave(DirectiveID.getLoc(), false);
9984   else if (IDVal == ".vsave")
9985     parseDirectiveRegSave(DirectiveID.getLoc(), true);
9986   else if (IDVal == ".ltorg" || IDVal == ".pool")
9987     parseDirectiveLtorg(DirectiveID.getLoc());
9988   else if (IDVal == ".even")
9989     parseDirectiveEven(DirectiveID.getLoc());
9990   else if (IDVal == ".personalityindex")
9991     parseDirectivePersonalityIndex(DirectiveID.getLoc());
9992   else if (IDVal == ".unwind_raw")
9993     parseDirectiveUnwindRaw(DirectiveID.getLoc());
9994   else if (IDVal == ".movsp")
9995     parseDirectiveMovSP(DirectiveID.getLoc());
9996   else if (IDVal == ".arch_extension")
9997     parseDirectiveArchExtension(DirectiveID.getLoc());
9998   else if (IDVal == ".align")
9999     return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
10000   else if (IDVal == ".thumb_set")
10001     parseDirectiveThumbSet(DirectiveID.getLoc());
10002   else if (IDVal == ".inst")
10003     parseDirectiveInst(DirectiveID.getLoc());
10004   else if (IDVal == ".inst.n")
10005     parseDirectiveInst(DirectiveID.getLoc(), 'n');
10006   else if (IDVal == ".inst.w")
10007     parseDirectiveInst(DirectiveID.getLoc(), 'w');
10008   else if (!IsMachO && !IsCOFF) {
10009     if (IDVal == ".arch")
10010       parseDirectiveArch(DirectiveID.getLoc());
10011     else if (IDVal == ".cpu")
10012       parseDirectiveCPU(DirectiveID.getLoc());
10013     else if (IDVal == ".eabi_attribute")
10014       parseDirectiveEabiAttr(DirectiveID.getLoc());
10015     else if (IDVal == ".fpu")
10016       parseDirectiveFPU(DirectiveID.getLoc());
10017     else if (IDVal == ".fnstart")
10018       parseDirectiveFnStart(DirectiveID.getLoc());
10019     else if (IDVal == ".object_arch")
10020       parseDirectiveObjectArch(DirectiveID.getLoc());
10021     else if (IDVal == ".tlsdescseq")
10022       parseDirectiveTLSDescSeq(DirectiveID.getLoc());
10023     else
10024       return true;
10025   } else
10026     return true;
10027   return false;
10028 }
10029 
10030 /// parseLiteralValues
10031 ///  ::= .hword expression [, expression]*
10032 ///  ::= .short expression [, expression]*
10033 ///  ::= .word expression [, expression]*
10034 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
10035   auto parseOne = [&]() -> bool {
10036     const MCExpr *Value;
10037     if (getParser().parseExpression(Value))
10038       return true;
10039     getParser().getStreamer().EmitValue(Value, Size, L);
10040     return false;
10041   };
10042   return (parseMany(parseOne));
10043 }
10044 
10045 /// parseDirectiveThumb
10046 ///  ::= .thumb
10047 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
10048   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
10049       check(!hasThumb(), L, "target does not support Thumb mode"))
10050     return true;
10051 
10052   if (!isThumb())
10053     SwitchMode();
10054 
10055   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
10056   return false;
10057 }
10058 
10059 /// parseDirectiveARM
10060 ///  ::= .arm
10061 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
10062   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
10063       check(!hasARM(), L, "target does not support ARM mode"))
10064     return true;
10065 
10066   if (isThumb())
10067     SwitchMode();
10068   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
10069   return false;
10070 }
10071 
10072 void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol) {
10073   // We need to flush the current implicit IT block on a label, because it is
10074   // not legal to branch into an IT block.
10075   flushPendingInstructions(getStreamer());
10076 }
10077 
10078 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
10079   if (NextSymbolIsThumb) {
10080     getParser().getStreamer().EmitThumbFunc(Symbol);
10081     NextSymbolIsThumb = false;
10082   }
10083 }
10084 
10085 /// parseDirectiveThumbFunc
10086 ///  ::= .thumbfunc symbol_name
10087 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
10088   MCAsmParser &Parser = getParser();
10089   const auto Format = getContext().getObjectFileInfo()->getObjectFileType();
10090   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
10091 
10092   // Darwin asm has (optionally) function name after .thumb_func direction
10093   // ELF doesn't
10094 
10095   if (IsMachO) {
10096     if (Parser.getTok().is(AsmToken::Identifier) ||
10097         Parser.getTok().is(AsmToken::String)) {
10098       MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
10099           Parser.getTok().getIdentifier());
10100       getParser().getStreamer().EmitThumbFunc(Func);
10101       Parser.Lex();
10102       if (parseToken(AsmToken::EndOfStatement,
10103                      "unexpected token in '.thumb_func' directive"))
10104         return true;
10105       return false;
10106     }
10107   }
10108 
10109   if (parseToken(AsmToken::EndOfStatement,
10110                  "unexpected token in '.thumb_func' directive"))
10111     return true;
10112 
10113   NextSymbolIsThumb = true;
10114   return false;
10115 }
10116 
10117 /// parseDirectiveSyntax
10118 ///  ::= .syntax unified | divided
10119 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
10120   MCAsmParser &Parser = getParser();
10121   const AsmToken &Tok = Parser.getTok();
10122   if (Tok.isNot(AsmToken::Identifier)) {
10123     Error(L, "unexpected token in .syntax directive");
10124     return false;
10125   }
10126 
10127   StringRef Mode = Tok.getString();
10128   Parser.Lex();
10129   if (check(Mode == "divided" || Mode == "DIVIDED", L,
10130             "'.syntax divided' arm assembly not supported") ||
10131       check(Mode != "unified" && Mode != "UNIFIED", L,
10132             "unrecognized syntax mode in .syntax directive") ||
10133       parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10134     return true;
10135 
10136   // TODO tell the MC streamer the mode
10137   // getParser().getStreamer().Emit???();
10138   return false;
10139 }
10140 
10141 /// parseDirectiveCode
10142 ///  ::= .code 16 | 32
10143 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
10144   MCAsmParser &Parser = getParser();
10145   const AsmToken &Tok = Parser.getTok();
10146   if (Tok.isNot(AsmToken::Integer))
10147     return Error(L, "unexpected token in .code directive");
10148   int64_t Val = Parser.getTok().getIntVal();
10149   if (Val != 16 && Val != 32) {
10150     Error(L, "invalid operand to .code directive");
10151     return false;
10152   }
10153   Parser.Lex();
10154 
10155   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10156     return true;
10157 
10158   if (Val == 16) {
10159     if (!hasThumb())
10160       return Error(L, "target does not support Thumb mode");
10161 
10162     if (!isThumb())
10163       SwitchMode();
10164     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
10165   } else {
10166     if (!hasARM())
10167       return Error(L, "target does not support ARM mode");
10168 
10169     if (isThumb())
10170       SwitchMode();
10171     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
10172   }
10173 
10174   return false;
10175 }
10176 
10177 /// parseDirectiveReq
10178 ///  ::= name .req registername
10179 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
10180   MCAsmParser &Parser = getParser();
10181   Parser.Lex(); // Eat the '.req' token.
10182   unsigned Reg;
10183   SMLoc SRegLoc, ERegLoc;
10184   if (check(ParseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
10185             "register name expected") ||
10186       parseToken(AsmToken::EndOfStatement,
10187                  "unexpected input in .req directive."))
10188     return true;
10189 
10190   if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
10191     return Error(SRegLoc,
10192                  "redefinition of '" + Name + "' does not match original.");
10193 
10194   return false;
10195 }
10196 
10197 /// parseDirectiveUneq
10198 ///  ::= .unreq registername
10199 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
10200   MCAsmParser &Parser = getParser();
10201   if (Parser.getTok().isNot(AsmToken::Identifier))
10202     return Error(L, "unexpected input in .unreq directive.");
10203   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
10204   Parser.Lex(); // Eat the identifier.
10205   if (parseToken(AsmToken::EndOfStatement,
10206                  "unexpected input in '.unreq' directive"))
10207     return true;
10208   return false;
10209 }
10210 
10211 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
10212 // before, if supported by the new target, or emit mapping symbols for the mode
10213 // switch.
10214 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
10215   if (WasThumb != isThumb()) {
10216     if (WasThumb && hasThumb()) {
10217       // Stay in Thumb mode
10218       SwitchMode();
10219     } else if (!WasThumb && hasARM()) {
10220       // Stay in ARM mode
10221       SwitchMode();
10222     } else {
10223       // Mode switch forced, because the new arch doesn't support the old mode.
10224       getParser().getStreamer().EmitAssemblerFlag(isThumb() ? MCAF_Code16
10225                                                             : MCAF_Code32);
10226       // Warn about the implcit mode switch. GAS does not switch modes here,
10227       // but instead stays in the old mode, reporting an error on any following
10228       // instructions as the mode does not exist on the target.
10229       Warning(Loc, Twine("new target does not support ") +
10230                        (WasThumb ? "thumb" : "arm") + " mode, switching to " +
10231                        (!WasThumb ? "thumb" : "arm") + " mode");
10232     }
10233   }
10234 }
10235 
10236 /// parseDirectiveArch
10237 ///  ::= .arch token
10238 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
10239   StringRef Arch = getParser().parseStringToEndOfStatement().trim();
10240   ARM::ArchKind ID = ARM::parseArch(Arch);
10241 
10242   if (ID == ARM::ArchKind::INVALID)
10243     return Error(L, "Unknown arch name");
10244 
10245   bool WasThumb = isThumb();
10246   Triple T;
10247   MCSubtargetInfo &STI = copySTI();
10248   STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str());
10249   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
10250   FixModeAfterArchChange(WasThumb, L);
10251 
10252   getTargetStreamer().emitArch(ID);
10253   return false;
10254 }
10255 
10256 /// parseDirectiveEabiAttr
10257 ///  ::= .eabi_attribute int, int [, "str"]
10258 ///  ::= .eabi_attribute Tag_name, int [, "str"]
10259 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
10260   MCAsmParser &Parser = getParser();
10261   int64_t Tag;
10262   SMLoc TagLoc;
10263   TagLoc = Parser.getTok().getLoc();
10264   if (Parser.getTok().is(AsmToken::Identifier)) {
10265     StringRef Name = Parser.getTok().getIdentifier();
10266     Tag = ARMBuildAttrs::AttrTypeFromString(Name);
10267     if (Tag == -1) {
10268       Error(TagLoc, "attribute name not recognised: " + Name);
10269       return false;
10270     }
10271     Parser.Lex();
10272   } else {
10273     const MCExpr *AttrExpr;
10274 
10275     TagLoc = Parser.getTok().getLoc();
10276     if (Parser.parseExpression(AttrExpr))
10277       return true;
10278 
10279     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
10280     if (check(!CE, TagLoc, "expected numeric constant"))
10281       return true;
10282 
10283     Tag = CE->getValue();
10284   }
10285 
10286   if (Parser.parseToken(AsmToken::Comma, "comma expected"))
10287     return true;
10288 
10289   StringRef StringValue = "";
10290   bool IsStringValue = false;
10291 
10292   int64_t IntegerValue = 0;
10293   bool IsIntegerValue = false;
10294 
10295   if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
10296     IsStringValue = true;
10297   else if (Tag == ARMBuildAttrs::compatibility) {
10298     IsStringValue = true;
10299     IsIntegerValue = true;
10300   } else if (Tag < 32 || Tag % 2 == 0)
10301     IsIntegerValue = true;
10302   else if (Tag % 2 == 1)
10303     IsStringValue = true;
10304   else
10305     llvm_unreachable("invalid tag type");
10306 
10307   if (IsIntegerValue) {
10308     const MCExpr *ValueExpr;
10309     SMLoc ValueExprLoc = Parser.getTok().getLoc();
10310     if (Parser.parseExpression(ValueExpr))
10311       return true;
10312 
10313     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
10314     if (!CE)
10315       return Error(ValueExprLoc, "expected numeric constant");
10316     IntegerValue = CE->getValue();
10317   }
10318 
10319   if (Tag == ARMBuildAttrs::compatibility) {
10320     if (Parser.parseToken(AsmToken::Comma, "comma expected"))
10321       return true;
10322   }
10323 
10324   if (IsStringValue) {
10325     if (Parser.getTok().isNot(AsmToken::String))
10326       return Error(Parser.getTok().getLoc(), "bad string constant");
10327 
10328     StringValue = Parser.getTok().getStringContents();
10329     Parser.Lex();
10330   }
10331 
10332   if (Parser.parseToken(AsmToken::EndOfStatement,
10333                         "unexpected token in '.eabi_attribute' directive"))
10334     return true;
10335 
10336   if (IsIntegerValue && IsStringValue) {
10337     assert(Tag == ARMBuildAttrs::compatibility);
10338     getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
10339   } else if (IsIntegerValue)
10340     getTargetStreamer().emitAttribute(Tag, IntegerValue);
10341   else if (IsStringValue)
10342     getTargetStreamer().emitTextAttribute(Tag, StringValue);
10343   return false;
10344 }
10345 
10346 /// parseDirectiveCPU
10347 ///  ::= .cpu str
10348 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
10349   StringRef CPU = getParser().parseStringToEndOfStatement().trim();
10350   getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
10351 
10352   // FIXME: This is using table-gen data, but should be moved to
10353   // ARMTargetParser once that is table-gen'd.
10354   if (!getSTI().isCPUStringValid(CPU))
10355     return Error(L, "Unknown CPU name");
10356 
10357   bool WasThumb = isThumb();
10358   MCSubtargetInfo &STI = copySTI();
10359   STI.setDefaultFeatures(CPU, "");
10360   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
10361   FixModeAfterArchChange(WasThumb, L);
10362 
10363   return false;
10364 }
10365 
10366 /// parseDirectiveFPU
10367 ///  ::= .fpu str
10368 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
10369   SMLoc FPUNameLoc = getTok().getLoc();
10370   StringRef FPU = getParser().parseStringToEndOfStatement().trim();
10371 
10372   unsigned ID = ARM::parseFPU(FPU);
10373   std::vector<StringRef> Features;
10374   if (!ARM::getFPUFeatures(ID, Features))
10375     return Error(FPUNameLoc, "Unknown FPU name");
10376 
10377   MCSubtargetInfo &STI = copySTI();
10378   for (auto Feature : Features)
10379     STI.ApplyFeatureFlag(Feature);
10380   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
10381 
10382   getTargetStreamer().emitFPU(ID);
10383   return false;
10384 }
10385 
10386 /// parseDirectiveFnStart
10387 ///  ::= .fnstart
10388 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
10389   if (parseToken(AsmToken::EndOfStatement,
10390                  "unexpected token in '.fnstart' directive"))
10391     return true;
10392 
10393   if (UC.hasFnStart()) {
10394     Error(L, ".fnstart starts before the end of previous one");
10395     UC.emitFnStartLocNotes();
10396     return true;
10397   }
10398 
10399   // Reset the unwind directives parser state
10400   UC.reset();
10401 
10402   getTargetStreamer().emitFnStart();
10403 
10404   UC.recordFnStart(L);
10405   return false;
10406 }
10407 
10408 /// parseDirectiveFnEnd
10409 ///  ::= .fnend
10410 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
10411   if (parseToken(AsmToken::EndOfStatement,
10412                  "unexpected token in '.fnend' directive"))
10413     return true;
10414   // Check the ordering of unwind directives
10415   if (!UC.hasFnStart())
10416     return Error(L, ".fnstart must precede .fnend directive");
10417 
10418   // Reset the unwind directives parser state
10419   getTargetStreamer().emitFnEnd();
10420 
10421   UC.reset();
10422   return false;
10423 }
10424 
10425 /// parseDirectiveCantUnwind
10426 ///  ::= .cantunwind
10427 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
10428   if (parseToken(AsmToken::EndOfStatement,
10429                  "unexpected token in '.cantunwind' directive"))
10430     return true;
10431 
10432   UC.recordCantUnwind(L);
10433   // Check the ordering of unwind directives
10434   if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
10435     return true;
10436 
10437   if (UC.hasHandlerData()) {
10438     Error(L, ".cantunwind can't be used with .handlerdata directive");
10439     UC.emitHandlerDataLocNotes();
10440     return true;
10441   }
10442   if (UC.hasPersonality()) {
10443     Error(L, ".cantunwind can't be used with .personality directive");
10444     UC.emitPersonalityLocNotes();
10445     return true;
10446   }
10447 
10448   getTargetStreamer().emitCantUnwind();
10449   return false;
10450 }
10451 
10452 /// parseDirectivePersonality
10453 ///  ::= .personality name
10454 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
10455   MCAsmParser &Parser = getParser();
10456   bool HasExistingPersonality = UC.hasPersonality();
10457 
10458   // Parse the name of the personality routine
10459   if (Parser.getTok().isNot(AsmToken::Identifier))
10460     return Error(L, "unexpected input in .personality directive.");
10461   StringRef Name(Parser.getTok().getIdentifier());
10462   Parser.Lex();
10463 
10464   if (parseToken(AsmToken::EndOfStatement,
10465                  "unexpected token in '.personality' directive"))
10466     return true;
10467 
10468   UC.recordPersonality(L);
10469 
10470   // Check the ordering of unwind directives
10471   if (!UC.hasFnStart())
10472     return Error(L, ".fnstart must precede .personality directive");
10473   if (UC.cantUnwind()) {
10474     Error(L, ".personality can't be used with .cantunwind directive");
10475     UC.emitCantUnwindLocNotes();
10476     return true;
10477   }
10478   if (UC.hasHandlerData()) {
10479     Error(L, ".personality must precede .handlerdata directive");
10480     UC.emitHandlerDataLocNotes();
10481     return true;
10482   }
10483   if (HasExistingPersonality) {
10484     Error(L, "multiple personality directives");
10485     UC.emitPersonalityLocNotes();
10486     return true;
10487   }
10488 
10489   MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
10490   getTargetStreamer().emitPersonality(PR);
10491   return false;
10492 }
10493 
10494 /// parseDirectiveHandlerData
10495 ///  ::= .handlerdata
10496 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
10497   if (parseToken(AsmToken::EndOfStatement,
10498                  "unexpected token in '.handlerdata' directive"))
10499     return true;
10500 
10501   UC.recordHandlerData(L);
10502   // Check the ordering of unwind directives
10503   if (!UC.hasFnStart())
10504     return Error(L, ".fnstart must precede .personality directive");
10505   if (UC.cantUnwind()) {
10506     Error(L, ".handlerdata can't be used with .cantunwind directive");
10507     UC.emitCantUnwindLocNotes();
10508     return true;
10509   }
10510 
10511   getTargetStreamer().emitHandlerData();
10512   return false;
10513 }
10514 
10515 /// parseDirectiveSetFP
10516 ///  ::= .setfp fpreg, spreg [, offset]
10517 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
10518   MCAsmParser &Parser = getParser();
10519   // Check the ordering of unwind directives
10520   if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
10521       check(UC.hasHandlerData(), L,
10522             ".setfp must precede .handlerdata directive"))
10523     return true;
10524 
10525   // Parse fpreg
10526   SMLoc FPRegLoc = Parser.getTok().getLoc();
10527   int FPReg = tryParseRegister();
10528 
10529   if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
10530       Parser.parseToken(AsmToken::Comma, "comma expected"))
10531     return true;
10532 
10533   // Parse spreg
10534   SMLoc SPRegLoc = Parser.getTok().getLoc();
10535   int SPReg = tryParseRegister();
10536   if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
10537       check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
10538             "register should be either $sp or the latest fp register"))
10539     return true;
10540 
10541   // Update the frame pointer register
10542   UC.saveFPReg(FPReg);
10543 
10544   // Parse offset
10545   int64_t Offset = 0;
10546   if (Parser.parseOptionalToken(AsmToken::Comma)) {
10547     if (Parser.getTok().isNot(AsmToken::Hash) &&
10548         Parser.getTok().isNot(AsmToken::Dollar))
10549       return Error(Parser.getTok().getLoc(), "'#' expected");
10550     Parser.Lex(); // skip hash token.
10551 
10552     const MCExpr *OffsetExpr;
10553     SMLoc ExLoc = Parser.getTok().getLoc();
10554     SMLoc EndLoc;
10555     if (getParser().parseExpression(OffsetExpr, EndLoc))
10556       return Error(ExLoc, "malformed setfp offset");
10557     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10558     if (check(!CE, ExLoc, "setfp offset must be an immediate"))
10559       return true;
10560     Offset = CE->getValue();
10561   }
10562 
10563   if (Parser.parseToken(AsmToken::EndOfStatement))
10564     return true;
10565 
10566   getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
10567                                 static_cast<unsigned>(SPReg), Offset);
10568   return false;
10569 }
10570 
10571 /// parseDirective
10572 ///  ::= .pad offset
10573 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
10574   MCAsmParser &Parser = getParser();
10575   // Check the ordering of unwind directives
10576   if (!UC.hasFnStart())
10577     return Error(L, ".fnstart must precede .pad directive");
10578   if (UC.hasHandlerData())
10579     return Error(L, ".pad must precede .handlerdata directive");
10580 
10581   // Parse the offset
10582   if (Parser.getTok().isNot(AsmToken::Hash) &&
10583       Parser.getTok().isNot(AsmToken::Dollar))
10584     return Error(Parser.getTok().getLoc(), "'#' expected");
10585   Parser.Lex(); // skip hash token.
10586 
10587   const MCExpr *OffsetExpr;
10588   SMLoc ExLoc = Parser.getTok().getLoc();
10589   SMLoc EndLoc;
10590   if (getParser().parseExpression(OffsetExpr, EndLoc))
10591     return Error(ExLoc, "malformed pad offset");
10592   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10593   if (!CE)
10594     return Error(ExLoc, "pad offset must be an immediate");
10595 
10596   if (parseToken(AsmToken::EndOfStatement,
10597                  "unexpected token in '.pad' directive"))
10598     return true;
10599 
10600   getTargetStreamer().emitPad(CE->getValue());
10601   return false;
10602 }
10603 
10604 /// parseDirectiveRegSave
10605 ///  ::= .save  { registers }
10606 ///  ::= .vsave { registers }
10607 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
10608   // Check the ordering of unwind directives
10609   if (!UC.hasFnStart())
10610     return Error(L, ".fnstart must precede .save or .vsave directives");
10611   if (UC.hasHandlerData())
10612     return Error(L, ".save or .vsave must precede .handlerdata directive");
10613 
10614   // RAII object to make sure parsed operands are deleted.
10615   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
10616 
10617   // Parse the register list
10618   if (parseRegisterList(Operands) ||
10619       parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10620     return true;
10621   ARMOperand &Op = (ARMOperand &)*Operands[0];
10622   if (!IsVector && !Op.isRegList())
10623     return Error(L, ".save expects GPR registers");
10624   if (IsVector && !Op.isDPRRegList())
10625     return Error(L, ".vsave expects DPR registers");
10626 
10627   getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
10628   return false;
10629 }
10630 
10631 /// parseDirectiveInst
10632 ///  ::= .inst opcode [, ...]
10633 ///  ::= .inst.n opcode [, ...]
10634 ///  ::= .inst.w opcode [, ...]
10635 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
10636   int Width = 4;
10637 
10638   if (isThumb()) {
10639     switch (Suffix) {
10640     case 'n':
10641       Width = 2;
10642       break;
10643     case 'w':
10644       break;
10645     default:
10646       Width = 0;
10647       break;
10648     }
10649   } else {
10650     if (Suffix)
10651       return Error(Loc, "width suffixes are invalid in ARM mode");
10652   }
10653 
10654   auto parseOne = [&]() -> bool {
10655     const MCExpr *Expr;
10656     if (getParser().parseExpression(Expr))
10657       return true;
10658     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
10659     if (!Value) {
10660       return Error(Loc, "expected constant expression");
10661     }
10662 
10663     char CurSuffix = Suffix;
10664     switch (Width) {
10665     case 2:
10666       if (Value->getValue() > 0xffff)
10667         return Error(Loc, "inst.n operand is too big, use inst.w instead");
10668       break;
10669     case 4:
10670       if (Value->getValue() > 0xffffffff)
10671         return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
10672                               " operand is too big");
10673       break;
10674     case 0:
10675       // Thumb mode, no width indicated. Guess from the opcode, if possible.
10676       if (Value->getValue() < 0xe800)
10677         CurSuffix = 'n';
10678       else if (Value->getValue() >= 0xe8000000)
10679         CurSuffix = 'w';
10680       else
10681         return Error(Loc, "cannot determine Thumb instruction size, "
10682                           "use inst.n/inst.w instead");
10683       break;
10684     default:
10685       llvm_unreachable("only supported widths are 2 and 4");
10686     }
10687 
10688     getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
10689     return false;
10690   };
10691 
10692   if (parseOptionalToken(AsmToken::EndOfStatement))
10693     return Error(Loc, "expected expression following directive");
10694   if (parseMany(parseOne))
10695     return true;
10696   return false;
10697 }
10698 
10699 /// parseDirectiveLtorg
10700 ///  ::= .ltorg | .pool
10701 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
10702   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10703     return true;
10704   getTargetStreamer().emitCurrentConstantPool();
10705   return false;
10706 }
10707 
10708 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
10709   const MCSection *Section = getStreamer().getCurrentSectionOnly();
10710 
10711   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10712     return true;
10713 
10714   if (!Section) {
10715     getStreamer().InitSections(false);
10716     Section = getStreamer().getCurrentSectionOnly();
10717   }
10718 
10719   assert(Section && "must have section to emit alignment");
10720   if (Section->UseCodeAlign())
10721     getStreamer().EmitCodeAlignment(2);
10722   else
10723     getStreamer().EmitValueToAlignment(2);
10724 
10725   return false;
10726 }
10727 
10728 /// parseDirectivePersonalityIndex
10729 ///   ::= .personalityindex index
10730 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
10731   MCAsmParser &Parser = getParser();
10732   bool HasExistingPersonality = UC.hasPersonality();
10733 
10734   const MCExpr *IndexExpression;
10735   SMLoc IndexLoc = Parser.getTok().getLoc();
10736   if (Parser.parseExpression(IndexExpression) ||
10737       parseToken(AsmToken::EndOfStatement,
10738                  "unexpected token in '.personalityindex' directive")) {
10739     return true;
10740   }
10741 
10742   UC.recordPersonalityIndex(L);
10743 
10744   if (!UC.hasFnStart()) {
10745     return Error(L, ".fnstart must precede .personalityindex directive");
10746   }
10747   if (UC.cantUnwind()) {
10748     Error(L, ".personalityindex cannot be used with .cantunwind");
10749     UC.emitCantUnwindLocNotes();
10750     return true;
10751   }
10752   if (UC.hasHandlerData()) {
10753     Error(L, ".personalityindex must precede .handlerdata directive");
10754     UC.emitHandlerDataLocNotes();
10755     return true;
10756   }
10757   if (HasExistingPersonality) {
10758     Error(L, "multiple personality directives");
10759     UC.emitPersonalityLocNotes();
10760     return true;
10761   }
10762 
10763   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
10764   if (!CE)
10765     return Error(IndexLoc, "index must be a constant number");
10766   if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
10767     return Error(IndexLoc,
10768                  "personality routine index should be in range [0-3]");
10769 
10770   getTargetStreamer().emitPersonalityIndex(CE->getValue());
10771   return false;
10772 }
10773 
10774 /// parseDirectiveUnwindRaw
10775 ///   ::= .unwind_raw offset, opcode [, opcode...]
10776 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
10777   MCAsmParser &Parser = getParser();
10778   int64_t StackOffset;
10779   const MCExpr *OffsetExpr;
10780   SMLoc OffsetLoc = getLexer().getLoc();
10781 
10782   if (!UC.hasFnStart())
10783     return Error(L, ".fnstart must precede .unwind_raw directives");
10784   if (getParser().parseExpression(OffsetExpr))
10785     return Error(OffsetLoc, "expected expression");
10786 
10787   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10788   if (!CE)
10789     return Error(OffsetLoc, "offset must be a constant");
10790 
10791   StackOffset = CE->getValue();
10792 
10793   if (Parser.parseToken(AsmToken::Comma, "expected comma"))
10794     return true;
10795 
10796   SmallVector<uint8_t, 16> Opcodes;
10797 
10798   auto parseOne = [&]() -> bool {
10799     const MCExpr *OE;
10800     SMLoc OpcodeLoc = getLexer().getLoc();
10801     if (check(getLexer().is(AsmToken::EndOfStatement) ||
10802                   Parser.parseExpression(OE),
10803               OpcodeLoc, "expected opcode expression"))
10804       return true;
10805     const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
10806     if (!OC)
10807       return Error(OpcodeLoc, "opcode value must be a constant");
10808     const int64_t Opcode = OC->getValue();
10809     if (Opcode & ~0xff)
10810       return Error(OpcodeLoc, "invalid opcode");
10811     Opcodes.push_back(uint8_t(Opcode));
10812     return false;
10813   };
10814 
10815   // Must have at least 1 element
10816   SMLoc OpcodeLoc = getLexer().getLoc();
10817   if (parseOptionalToken(AsmToken::EndOfStatement))
10818     return Error(OpcodeLoc, "expected opcode expression");
10819   if (parseMany(parseOne))
10820     return true;
10821 
10822   getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
10823   return false;
10824 }
10825 
10826 /// parseDirectiveTLSDescSeq
10827 ///   ::= .tlsdescseq tls-variable
10828 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
10829   MCAsmParser &Parser = getParser();
10830 
10831   if (getLexer().isNot(AsmToken::Identifier))
10832     return TokError("expected variable after '.tlsdescseq' directive");
10833 
10834   const MCSymbolRefExpr *SRE =
10835     MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
10836                             MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
10837   Lex();
10838 
10839   if (parseToken(AsmToken::EndOfStatement,
10840                  "unexpected token in '.tlsdescseq' directive"))
10841     return true;
10842 
10843   getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
10844   return false;
10845 }
10846 
10847 /// parseDirectiveMovSP
10848 ///  ::= .movsp reg [, #offset]
10849 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
10850   MCAsmParser &Parser = getParser();
10851   if (!UC.hasFnStart())
10852     return Error(L, ".fnstart must precede .movsp directives");
10853   if (UC.getFPReg() != ARM::SP)
10854     return Error(L, "unexpected .movsp directive");
10855 
10856   SMLoc SPRegLoc = Parser.getTok().getLoc();
10857   int SPReg = tryParseRegister();
10858   if (SPReg == -1)
10859     return Error(SPRegLoc, "register expected");
10860   if (SPReg == ARM::SP || SPReg == ARM::PC)
10861     return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
10862 
10863   int64_t Offset = 0;
10864   if (Parser.parseOptionalToken(AsmToken::Comma)) {
10865     if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
10866       return true;
10867 
10868     const MCExpr *OffsetExpr;
10869     SMLoc OffsetLoc = Parser.getTok().getLoc();
10870 
10871     if (Parser.parseExpression(OffsetExpr))
10872       return Error(OffsetLoc, "malformed offset expression");
10873 
10874     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10875     if (!CE)
10876       return Error(OffsetLoc, "offset must be an immediate constant");
10877 
10878     Offset = CE->getValue();
10879   }
10880 
10881   if (parseToken(AsmToken::EndOfStatement,
10882                  "unexpected token in '.movsp' directive"))
10883     return true;
10884 
10885   getTargetStreamer().emitMovSP(SPReg, Offset);
10886   UC.saveFPReg(SPReg);
10887 
10888   return false;
10889 }
10890 
10891 /// parseDirectiveObjectArch
10892 ///   ::= .object_arch name
10893 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
10894   MCAsmParser &Parser = getParser();
10895   if (getLexer().isNot(AsmToken::Identifier))
10896     return Error(getLexer().getLoc(), "unexpected token");
10897 
10898   StringRef Arch = Parser.getTok().getString();
10899   SMLoc ArchLoc = Parser.getTok().getLoc();
10900   Lex();
10901 
10902   ARM::ArchKind ID = ARM::parseArch(Arch);
10903 
10904   if (ID == ARM::ArchKind::INVALID)
10905     return Error(ArchLoc, "unknown architecture '" + Arch + "'");
10906   if (parseToken(AsmToken::EndOfStatement))
10907     return true;
10908 
10909   getTargetStreamer().emitObjectArch(ID);
10910   return false;
10911 }
10912 
10913 /// parseDirectiveAlign
10914 ///   ::= .align
10915 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
10916   // NOTE: if this is not the end of the statement, fall back to the target
10917   // agnostic handling for this directive which will correctly handle this.
10918   if (parseOptionalToken(AsmToken::EndOfStatement)) {
10919     // '.align' is target specifically handled to mean 2**2 byte alignment.
10920     const MCSection *Section = getStreamer().getCurrentSectionOnly();
10921     assert(Section && "must have section to emit alignment");
10922     if (Section->UseCodeAlign())
10923       getStreamer().EmitCodeAlignment(4, 0);
10924     else
10925       getStreamer().EmitValueToAlignment(4, 0, 1, 0);
10926     return false;
10927   }
10928   return true;
10929 }
10930 
10931 /// parseDirectiveThumbSet
10932 ///  ::= .thumb_set name, value
10933 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
10934   MCAsmParser &Parser = getParser();
10935 
10936   StringRef Name;
10937   if (check(Parser.parseIdentifier(Name),
10938             "expected identifier after '.thumb_set'") ||
10939       parseToken(AsmToken::Comma, "expected comma after name '" + Name + "'"))
10940     return true;
10941 
10942   MCSymbol *Sym;
10943   const MCExpr *Value;
10944   if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
10945                                                Parser, Sym, Value))
10946     return true;
10947 
10948   getTargetStreamer().emitThumbSet(Sym, Value);
10949   return false;
10950 }
10951 
10952 /// Force static initialization.
10953 extern "C" void LLVMInitializeARMAsmParser() {
10954   RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
10955   RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
10956   RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
10957   RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
10958 }
10959 
10960 #define GET_REGISTER_MATCHER
10961 #define GET_SUBTARGET_FEATURE_NAME
10962 #define GET_MATCHER_IMPLEMENTATION
10963 #define GET_MNEMONIC_SPELL_CHECKER
10964 #include "ARMGenAsmMatcher.inc"
10965 
10966 // Some diagnostics need to vary with subtarget features, so they are handled
10967 // here. For example, the DPR class has either 16 or 32 registers, depending
10968 // on the FPU available.
10969 const char *
10970 ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
10971   switch (MatchError) {
10972   // rGPR contains sp starting with ARMv8.
10973   case Match_rGPR:
10974     return hasV8Ops() ? "operand must be a register in range [r0, r14]"
10975                       : "operand must be a register in range [r0, r12] or r14";
10976   // DPR contains 16 registers for some FPUs, and 32 for others.
10977   case Match_DPR:
10978     return hasD32() ? "operand must be a register in range [d0, d31]"
10979                     : "operand must be a register in range [d0, d15]";
10980   case Match_DPR_RegList:
10981     return hasD32() ? "operand must be a list of registers in range [d0, d31]"
10982                     : "operand must be a list of registers in range [d0, d15]";
10983 
10984   // For all other diags, use the static string from tablegen.
10985   default:
10986     return getMatchKindDiag(MatchError);
10987   }
10988 }
10989 
10990 // Process the list of near-misses, throwing away ones we don't want to report
10991 // to the user, and converting the rest to a source location and string that
10992 // should be reported.
10993 void
10994 ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
10995                                SmallVectorImpl<NearMissMessage> &NearMissesOut,
10996                                SMLoc IDLoc, OperandVector &Operands) {
10997   // TODO: If operand didn't match, sub in a dummy one and run target
10998   // predicate, so that we can avoid reporting near-misses that are invalid?
10999   // TODO: Many operand types dont have SuperClasses set, so we report
11000   // redundant ones.
11001   // TODO: Some operands are superclasses of registers (e.g.
11002   // MCK_RegShiftedImm), we don't have any way to represent that currently.
11003   // TODO: This is not all ARM-specific, can some of it be factored out?
11004 
11005   // Record some information about near-misses that we have already seen, so
11006   // that we can avoid reporting redundant ones. For example, if there are
11007   // variants of an instruction that take 8- and 16-bit immediates, we want
11008   // to only report the widest one.
11009   std::multimap<unsigned, unsigned> OperandMissesSeen;
11010   SmallSet<FeatureBitset, 4> FeatureMissesSeen;
11011   bool ReportedTooFewOperands = false;
11012 
11013   // Process the near-misses in reverse order, so that we see more general ones
11014   // first, and so can avoid emitting more specific ones.
11015   for (NearMissInfo &I : reverse(NearMissesIn)) {
11016     switch (I.getKind()) {
11017     case NearMissInfo::NearMissOperand: {
11018       SMLoc OperandLoc =
11019           ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
11020       const char *OperandDiag =
11021           getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
11022 
11023       // If we have already emitted a message for a superclass, don't also report
11024       // the sub-class. We consider all operand classes that we don't have a
11025       // specialised diagnostic for to be equal for the propose of this check,
11026       // so that we don't report the generic error multiple times on the same
11027       // operand.
11028       unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
11029       auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
11030       if (std::any_of(PrevReports.first, PrevReports.second,
11031                       [DupCheckMatchClass](
11032                           const std::pair<unsigned, unsigned> Pair) {
11033             if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
11034               return Pair.second == DupCheckMatchClass;
11035             else
11036               return isSubclass((MatchClassKind)DupCheckMatchClass,
11037                                 (MatchClassKind)Pair.second);
11038           }))
11039         break;
11040       OperandMissesSeen.insert(
11041           std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
11042 
11043       NearMissMessage Message;
11044       Message.Loc = OperandLoc;
11045       if (OperandDiag) {
11046         Message.Message = OperandDiag;
11047       } else if (I.getOperandClass() == InvalidMatchClass) {
11048         Message.Message = "too many operands for instruction";
11049       } else {
11050         Message.Message = "invalid operand for instruction";
11051         LLVM_DEBUG(
11052             dbgs() << "Missing diagnostic string for operand class "
11053                    << getMatchClassName((MatchClassKind)I.getOperandClass())
11054                    << I.getOperandClass() << ", error " << I.getOperandError()
11055                    << ", opcode " << MII.getName(I.getOpcode()) << "\n");
11056       }
11057       NearMissesOut.emplace_back(Message);
11058       break;
11059     }
11060     case NearMissInfo::NearMissFeature: {
11061       const FeatureBitset &MissingFeatures = I.getFeatures();
11062       // Don't report the same set of features twice.
11063       if (FeatureMissesSeen.count(MissingFeatures))
11064         break;
11065       FeatureMissesSeen.insert(MissingFeatures);
11066 
11067       // Special case: don't report a feature set which includes arm-mode for
11068       // targets that don't have ARM mode.
11069       if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
11070         break;
11071       // Don't report any near-misses that both require switching instruction
11072       // set, and adding other subtarget features.
11073       if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
11074           MissingFeatures.count() > 1)
11075         break;
11076       if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
11077           MissingFeatures.count() > 1)
11078         break;
11079       if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
11080           (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
11081                                              Feature_IsThumbBit})).any())
11082         break;
11083       if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
11084         break;
11085 
11086       NearMissMessage Message;
11087       Message.Loc = IDLoc;
11088       raw_svector_ostream OS(Message.Message);
11089 
11090       OS << "instruction requires:";
11091       for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
11092         if (MissingFeatures.test(i))
11093           OS << ' ' << getSubtargetFeatureName(i);
11094 
11095       NearMissesOut.emplace_back(Message);
11096 
11097       break;
11098     }
11099     case NearMissInfo::NearMissPredicate: {
11100       NearMissMessage Message;
11101       Message.Loc = IDLoc;
11102       switch (I.getPredicateError()) {
11103       case Match_RequiresNotITBlock:
11104         Message.Message = "flag setting instruction only valid outside IT block";
11105         break;
11106       case Match_RequiresITBlock:
11107         Message.Message = "instruction only valid inside IT block";
11108         break;
11109       case Match_RequiresV6:
11110         Message.Message = "instruction variant requires ARMv6 or later";
11111         break;
11112       case Match_RequiresThumb2:
11113         Message.Message = "instruction variant requires Thumb2";
11114         break;
11115       case Match_RequiresV8:
11116         Message.Message = "instruction variant requires ARMv8 or later";
11117         break;
11118       case Match_RequiresFlagSetting:
11119         Message.Message = "no flag-preserving variant of this instruction available";
11120         break;
11121       case Match_InvalidOperand:
11122         Message.Message = "invalid operand for instruction";
11123         break;
11124       default:
11125         llvm_unreachable("Unhandled target predicate error");
11126         break;
11127       }
11128       NearMissesOut.emplace_back(Message);
11129       break;
11130     }
11131     case NearMissInfo::NearMissTooFewOperands: {
11132       if (!ReportedTooFewOperands) {
11133         SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
11134         NearMissesOut.emplace_back(NearMissMessage{
11135             EndLoc, StringRef("too few operands for instruction")});
11136         ReportedTooFewOperands = true;
11137       }
11138       break;
11139     }
11140     case NearMissInfo::NoNearMiss:
11141       // This should never leave the matcher.
11142       llvm_unreachable("not a near-miss");
11143       break;
11144     }
11145   }
11146 }
11147 
11148 void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
11149                                     SMLoc IDLoc, OperandVector &Operands) {
11150   SmallVector<NearMissMessage, 4> Messages;
11151   FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
11152 
11153   if (Messages.size() == 0) {
11154     // No near-misses were found, so the best we can do is "invalid
11155     // instruction".
11156     Error(IDLoc, "invalid instruction");
11157   } else if (Messages.size() == 1) {
11158     // One near miss was found, report it as the sole error.
11159     Error(Messages[0].Loc, Messages[0].Message);
11160   } else {
11161     // More than one near miss, so report a generic "invalid instruction"
11162     // error, followed by notes for each of the near-misses.
11163     Error(IDLoc, "invalid instruction, any one of the following would fix this:");
11164     for (auto &M : Messages) {
11165       Note(M.Loc, M.Message);
11166     }
11167   }
11168 }
11169 
11170 /// parseDirectiveArchExtension
11171 ///   ::= .arch_extension [no]feature
11172 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
11173   // FIXME: This structure should be moved inside ARMTargetParser
11174   // when we start to table-generate them, and we can use the ARM
11175   // flags below, that were generated by table-gen.
11176   static const struct {
11177     const unsigned Kind;
11178     const FeatureBitset ArchCheck;
11179     const FeatureBitset Features;
11180   } Extensions[] = {
11181     { ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC} },
11182     { ARM::AEK_CRYPTO,  {Feature_HasV8Bit},
11183       {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
11184     { ARM::AEK_FP, {Feature_HasV8Bit},
11185       {ARM::FeatureVFP2_D16_SP, ARM::FeatureFPARMv8} },
11186     { (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
11187       {Feature_HasV7Bit, Feature_IsNotMClassBit},
11188       {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} },
11189     { ARM::AEK_MP, {Feature_HasV7Bit, Feature_IsNotMClassBit},
11190       {ARM::FeatureMP} },
11191     { ARM::AEK_SIMD, {Feature_HasV8Bit},
11192       {ARM::FeatureNEON, ARM::FeatureVFP2_D16_SP, ARM::FeatureFPARMv8} },
11193     { ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone} },
11194     // FIXME: Only available in A-class, isel not predicated
11195     { ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization} },
11196     { ARM::AEK_FP16, {Feature_HasV8_2aBit},
11197       {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
11198     { ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS} },
11199     { ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB} },
11200     // FIXME: Unsupported extensions.
11201     { ARM::AEK_OS, {}, {} },
11202     { ARM::AEK_IWMMXT, {}, {} },
11203     { ARM::AEK_IWMMXT2, {}, {} },
11204     { ARM::AEK_MAVERICK, {}, {} },
11205     { ARM::AEK_XSCALE, {}, {} },
11206   };
11207 
11208   MCAsmParser &Parser = getParser();
11209 
11210   if (getLexer().isNot(AsmToken::Identifier))
11211     return Error(getLexer().getLoc(), "expected architecture extension name");
11212 
11213   StringRef Name = Parser.getTok().getString();
11214   SMLoc ExtLoc = Parser.getTok().getLoc();
11215   Lex();
11216 
11217   if (parseToken(AsmToken::EndOfStatement,
11218                  "unexpected token in '.arch_extension' directive"))
11219     return true;
11220 
11221   bool EnableFeature = true;
11222   if (Name.startswith_lower("no")) {
11223     EnableFeature = false;
11224     Name = Name.substr(2);
11225   }
11226   unsigned FeatureKind = ARM::parseArchExt(Name);
11227   if (FeatureKind == ARM::AEK_INVALID)
11228     return Error(ExtLoc, "unknown architectural extension: " + Name);
11229 
11230   for (const auto &Extension : Extensions) {
11231     if (Extension.Kind != FeatureKind)
11232       continue;
11233 
11234     if (Extension.Features.none())
11235       return Error(ExtLoc, "unsupported architectural extension: " + Name);
11236 
11237     if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
11238       return Error(ExtLoc, "architectural extension '" + Name +
11239                                "' is not "
11240                                "allowed for the current base architecture");
11241 
11242     MCSubtargetInfo &STI = copySTI();
11243     if (EnableFeature) {
11244       STI.SetFeatureBitsTransitively(Extension.Features);
11245     } else {
11246       STI.ClearFeatureBitsTransitively(Extension.Features);
11247     }
11248     FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
11249     setAvailableFeatures(Features);
11250     return false;
11251   }
11252 
11253   return Error(ExtLoc, "unknown architectural extension: " + Name);
11254 }
11255 
11256 // Define this matcher function after the auto-generated include so we
11257 // have the match class enum definitions.
11258 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
11259                                                   unsigned Kind) {
11260   ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
11261   // If the kind is a token for a literal immediate, check if our asm
11262   // operand matches. This is for InstAliases which have a fixed-value
11263   // immediate in the syntax.
11264   switch (Kind) {
11265   default: break;
11266   case MCK__35_0:
11267     if (Op.isImm())
11268       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
11269         if (CE->getValue() == 0)
11270           return Match_Success;
11271     break;
11272   case MCK__35_8:
11273     if (Op.isImm())
11274       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
11275         if (CE->getValue() == 8)
11276           return Match_Success;
11277     break;
11278   case MCK__35_16:
11279     if (Op.isImm())
11280       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
11281         if (CE->getValue() == 16)
11282           return Match_Success;
11283     break;
11284   case MCK_ModImm:
11285     if (Op.isImm()) {
11286       const MCExpr *SOExpr = Op.getImm();
11287       int64_t Value;
11288       if (!SOExpr->evaluateAsAbsolute(Value))
11289         return Match_Success;
11290       assert((Value >= std::numeric_limits<int32_t>::min() &&
11291               Value <= std::numeric_limits<uint32_t>::max()) &&
11292              "expression value must be representable in 32 bits");
11293     }
11294     break;
11295   case MCK_rGPR:
11296     if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
11297       return Match_Success;
11298     return Match_rGPR;
11299   case MCK_GPRPair:
11300     if (Op.isReg() &&
11301         MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
11302       return Match_Success;
11303     break;
11304   }
11305   return Match_InvalidOperand;
11306 }
11307 
11308 bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
11309                                            StringRef ExtraToken) {
11310   if (!hasMVE())
11311     return false;
11312 
11313   return Mnemonic.startswith("vabav") || Mnemonic.startswith("vaddv") ||
11314          Mnemonic.startswith("vaddlv") || Mnemonic.startswith("vminnmv") ||
11315          Mnemonic.startswith("vminnmav") || Mnemonic.startswith("vminv") ||
11316          Mnemonic.startswith("vminav") || Mnemonic.startswith("vmaxnmv") ||
11317          Mnemonic.startswith("vmaxnmav") || Mnemonic.startswith("vmaxv") ||
11318          Mnemonic.startswith("vmaxav") || Mnemonic.startswith("vmladav") ||
11319          Mnemonic.startswith("vrmlaldavh") || Mnemonic.startswith("vrmlalvh") ||
11320          Mnemonic.startswith("vmlsdav") || Mnemonic.startswith("vmlav") ||
11321          Mnemonic.startswith("vmlaldav") || Mnemonic.startswith("vmlalv") ||
11322          Mnemonic.startswith("vmaxnm") || Mnemonic.startswith("vminnm") ||
11323          Mnemonic.startswith("vmax") || Mnemonic.startswith("vmin") ||
11324          Mnemonic.startswith("vshlc") || Mnemonic.startswith("vmovlt") ||
11325          Mnemonic.startswith("vmovlb") || Mnemonic.startswith("vshll") ||
11326          Mnemonic.startswith("vrshrn") || Mnemonic.startswith("vshrn") ||
11327          Mnemonic.startswith("vqrshrun") || Mnemonic.startswith("vqshrun") ||
11328          Mnemonic.startswith("vqrshrn") || Mnemonic.startswith("vqshrn") ||
11329          Mnemonic.startswith("vbic") || Mnemonic.startswith("vrev64") ||
11330          Mnemonic.startswith("vrev32") || Mnemonic.startswith("vrev16") ||
11331          Mnemonic.startswith("vmvn") || Mnemonic.startswith("veor") ||
11332          Mnemonic.startswith("vorn") || Mnemonic.startswith("vorr") ||
11333          Mnemonic.startswith("vand") || Mnemonic.startswith("vmul") ||
11334          Mnemonic.startswith("vqrdmulh") || Mnemonic.startswith("vqdmulh") ||
11335          Mnemonic.startswith("vsub") || Mnemonic.startswith("vadd") ||
11336          Mnemonic.startswith("vqsub") || Mnemonic.startswith("vqadd") ||
11337          Mnemonic.startswith("vabd") || Mnemonic.startswith("vrhadd") ||
11338          Mnemonic.startswith("vhsub") || Mnemonic.startswith("vhadd") ||
11339          Mnemonic.startswith("vdup") || Mnemonic.startswith("vcls") ||
11340          Mnemonic.startswith("vclz") || Mnemonic.startswith("vneg") ||
11341          Mnemonic.startswith("vabs") || Mnemonic.startswith("vqneg") ||
11342          Mnemonic.startswith("vqabs") ||
11343          (Mnemonic.startswith("vrint") && Mnemonic != "vrintr") ||
11344          Mnemonic.startswith("vcmla") || Mnemonic.startswith("vfma") ||
11345          Mnemonic.startswith("vfms") || Mnemonic.startswith("vcadd") ||
11346          Mnemonic.startswith("vadd") || Mnemonic.startswith("vsub") ||
11347          Mnemonic.startswith("vshl") || Mnemonic.startswith("vqshl") ||
11348          Mnemonic.startswith("vqrshl") || Mnemonic.startswith("vrshl") ||
11349          Mnemonic.startswith("vsri") || Mnemonic.startswith("vsli") ||
11350          Mnemonic.startswith("vrshr") || Mnemonic.startswith("vshr") ||
11351          Mnemonic.startswith("vpsel") || Mnemonic.startswith("vcmp") ||
11352          Mnemonic.startswith("vqdmladh") || Mnemonic.startswith("vqrdmladh") ||
11353          Mnemonic.startswith("vqdmlsdh") || Mnemonic.startswith("vqrdmlsdh") ||
11354          Mnemonic.startswith("vcmul") || Mnemonic.startswith("vrmulh") ||
11355          Mnemonic.startswith("vqmovn") || Mnemonic.startswith("vqmovun") ||
11356          Mnemonic.startswith("vmovnt") || Mnemonic.startswith("vmovnb") ||
11357          Mnemonic.startswith("vmaxa") || Mnemonic.startswith("vmaxnma") ||
11358          Mnemonic.startswith("vhcadd") || Mnemonic.startswith("vadc") ||
11359          Mnemonic.startswith("vsbc") || Mnemonic.startswith("vrshr") ||
11360          Mnemonic.startswith("vshr") || Mnemonic.startswith("vstrb") ||
11361          Mnemonic.startswith("vldrb") ||
11362          (Mnemonic.startswith("vstrh") && Mnemonic != "vstrhi") ||
11363          (Mnemonic.startswith("vldrh") && Mnemonic != "vldrhi") ||
11364          Mnemonic.startswith("vstrw") || Mnemonic.startswith("vldrw") ||
11365          Mnemonic.startswith("vldrd") || Mnemonic.startswith("vstrd") ||
11366          Mnemonic.startswith("vqdmull") || Mnemonic.startswith("vbrsr") ||
11367          Mnemonic.startswith("vfmas") || Mnemonic.startswith("vmlas") ||
11368          Mnemonic.startswith("vmla") || Mnemonic.startswith("vqdmlash") ||
11369          Mnemonic.startswith("vqdmlah") || Mnemonic.startswith("vqrdmlash") ||
11370          Mnemonic.startswith("vqrdmlah") || Mnemonic.startswith("viwdup") ||
11371          Mnemonic.startswith("vdwdup") || Mnemonic.startswith("vidup") ||
11372          Mnemonic.startswith("vddup") || Mnemonic.startswith("vctp") ||
11373          Mnemonic.startswith("vpnot") || Mnemonic.startswith("vbic") ||
11374          Mnemonic.startswith("vrmlsldavh") || Mnemonic.startswith("vmlsldav") ||
11375          Mnemonic.startswith("vcvt") ||
11376          (Mnemonic.startswith("vmov") &&
11377           !(ExtraToken == ".f16" || ExtraToken == ".32" ||
11378             ExtraToken == ".16" || ExtraToken == ".8"));
11379 }
11380