1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMFeatures.h"
10 #include "Utils/ARMBaseInfo.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMBaseInfo.h"
13 #include "MCTargetDesc/ARMInstPrinter.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "TargetInfo/ARMTargetInfo.h"
17 #include "llvm/ADT/APFloat.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/StringMap.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/ADT/StringSwitch.h"
26 #include "llvm/ADT/Triple.h"
27 #include "llvm/ADT/Twine.h"
28 #include "llvm/MC/MCContext.h"
29 #include "llvm/MC/MCExpr.h"
30 #include "llvm/MC/MCInst.h"
31 #include "llvm/MC/MCInstrDesc.h"
32 #include "llvm/MC/MCInstrInfo.h"
33 #include "llvm/MC/MCObjectFileInfo.h"
34 #include "llvm/MC/MCParser/MCAsmLexer.h"
35 #include "llvm/MC/MCParser/MCAsmParser.h"
36 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
37 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
38 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
39 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
40 #include "llvm/MC/MCRegisterInfo.h"
41 #include "llvm/MC/MCSection.h"
42 #include "llvm/MC/MCStreamer.h"
43 #include "llvm/MC/MCSubtargetInfo.h"
44 #include "llvm/MC/MCSymbol.h"
45 #include "llvm/MC/SubtargetFeature.h"
46 #include "llvm/Support/ARMBuildAttributes.h"
47 #include "llvm/Support/ARMEHABI.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Compiler.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/SMLoc.h"
54 #include "llvm/Support/TargetParser.h"
55 #include "llvm/Support/TargetRegistry.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstddef>
60 #include <cstdint>
61 #include <iterator>
62 #include <limits>
63 #include <memory>
64 #include <string>
65 #include <utility>
66 #include <vector>
67 
68 #define DEBUG_TYPE "asm-parser"
69 
70 using namespace llvm;
71 
72 namespace llvm {
73 extern const MCInstrDesc ARMInsts[];
74 } // end namespace llvm
75 
76 namespace {
77 
78 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
79 
80 static cl::opt<ImplicitItModeTy> ImplicitItMode(
81     "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
82     cl::desc("Allow conditional instructions outdside of an IT block"),
83     cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
84                           "Accept in both ISAs, emit implicit ITs in Thumb"),
85                clEnumValN(ImplicitItModeTy::Never, "never",
86                           "Warn in ARM, reject in Thumb"),
87                clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
88                           "Accept in ARM, reject in Thumb"),
89                clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
90                           "Warn in ARM, emit implicit ITs in Thumb")));
91 
92 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
93                                         cl::init(false));
94 
95 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
96 
97 static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) {
98   // Position==0 means we're not in an IT block at all. Position==1
99   // means we want the first state bit, which is always 0 (Then).
100   // Position==2 means we want the second state bit, stored at bit 3
101   // of Mask, and so on downwards. So (5 - Position) will shift the
102   // right bit down to bit 0, including the always-0 bit at bit 4 for
103   // the mandatory initial Then.
104   return (Mask >> (5 - Position) & 1);
105 }
106 
107 class UnwindContext {
108   using Locs = SmallVector<SMLoc, 4>;
109 
110   MCAsmParser &Parser;
111   Locs FnStartLocs;
112   Locs CantUnwindLocs;
113   Locs PersonalityLocs;
114   Locs PersonalityIndexLocs;
115   Locs HandlerDataLocs;
116   int FPReg;
117 
118 public:
119   UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
120 
121   bool hasFnStart() const { return !FnStartLocs.empty(); }
122   bool cantUnwind() const { return !CantUnwindLocs.empty(); }
123   bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
124 
125   bool hasPersonality() const {
126     return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
127   }
128 
129   void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
130   void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
131   void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
132   void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
133   void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
134 
135   void saveFPReg(int Reg) { FPReg = Reg; }
136   int getFPReg() const { return FPReg; }
137 
138   void emitFnStartLocNotes() const {
139     for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
140          FI != FE; ++FI)
141       Parser.Note(*FI, ".fnstart was specified here");
142   }
143 
144   void emitCantUnwindLocNotes() const {
145     for (Locs::const_iterator UI = CantUnwindLocs.begin(),
146                               UE = CantUnwindLocs.end(); UI != UE; ++UI)
147       Parser.Note(*UI, ".cantunwind was specified here");
148   }
149 
150   void emitHandlerDataLocNotes() const {
151     for (Locs::const_iterator HI = HandlerDataLocs.begin(),
152                               HE = HandlerDataLocs.end(); HI != HE; ++HI)
153       Parser.Note(*HI, ".handlerdata was specified here");
154   }
155 
156   void emitPersonalityLocNotes() const {
157     for (Locs::const_iterator PI = PersonalityLocs.begin(),
158                               PE = PersonalityLocs.end(),
159                               PII = PersonalityIndexLocs.begin(),
160                               PIE = PersonalityIndexLocs.end();
161          PI != PE || PII != PIE;) {
162       if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
163         Parser.Note(*PI++, ".personality was specified here");
164       else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
165         Parser.Note(*PII++, ".personalityindex was specified here");
166       else
167         llvm_unreachable(".personality and .personalityindex cannot be "
168                          "at the same location");
169     }
170   }
171 
172   void reset() {
173     FnStartLocs = Locs();
174     CantUnwindLocs = Locs();
175     PersonalityLocs = Locs();
176     HandlerDataLocs = Locs();
177     PersonalityIndexLocs = Locs();
178     FPReg = ARM::SP;
179   }
180 };
181 
182 
183 class ARMAsmParser : public MCTargetAsmParser {
184   const MCRegisterInfo *MRI;
185   UnwindContext UC;
186 
187   ARMTargetStreamer &getTargetStreamer() {
188     assert(getParser().getStreamer().getTargetStreamer() &&
189            "do not have a target streamer");
190     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
191     return static_cast<ARMTargetStreamer &>(TS);
192   }
193 
194   // Map of register aliases registers via the .req directive.
195   StringMap<unsigned> RegisterReqs;
196 
197   bool NextSymbolIsThumb;
198 
199   bool useImplicitITThumb() const {
200     return ImplicitItMode == ImplicitItModeTy::Always ||
201            ImplicitItMode == ImplicitItModeTy::ThumbOnly;
202   }
203 
204   bool useImplicitITARM() const {
205     return ImplicitItMode == ImplicitItModeTy::Always ||
206            ImplicitItMode == ImplicitItModeTy::ARMOnly;
207   }
208 
209   struct {
210     ARMCC::CondCodes Cond;    // Condition for IT block.
211     unsigned Mask:4;          // Condition mask for instructions.
212                               // Starting at first 1 (from lsb).
213                               //   '1'  condition as indicated in IT.
214                               //   '0'  inverse of condition (else).
215                               // Count of instructions in IT block is
216                               // 4 - trailingzeroes(mask)
217                               // Note that this does not have the same encoding
218                               // as in the IT instruction, which also depends
219                               // on the low bit of the condition code.
220 
221     unsigned CurPosition;     // Current position in parsing of IT
222                               // block. In range [0,4], with 0 being the IT
223                               // instruction itself. Initialized according to
224                               // count of instructions in block.  ~0U if no
225                               // active IT block.
226 
227     bool IsExplicit;          // true  - The IT instruction was present in the
228                               //         input, we should not modify it.
229                               // false - The IT instruction was added
230                               //         implicitly, we can extend it if that
231                               //         would be legal.
232   } ITState;
233 
234   SmallVector<MCInst, 4> PendingConditionalInsts;
235 
236   void flushPendingInstructions(MCStreamer &Out) override {
237     if (!inImplicitITBlock()) {
238       assert(PendingConditionalInsts.size() == 0);
239       return;
240     }
241 
242     // Emit the IT instruction
243     MCInst ITInst;
244     ITInst.setOpcode(ARM::t2IT);
245     ITInst.addOperand(MCOperand::createImm(ITState.Cond));
246     ITInst.addOperand(MCOperand::createImm(ITState.Mask));
247     Out.EmitInstruction(ITInst, getSTI());
248 
249     // Emit the conditonal instructions
250     assert(PendingConditionalInsts.size() <= 4);
251     for (const MCInst &Inst : PendingConditionalInsts) {
252       Out.EmitInstruction(Inst, getSTI());
253     }
254     PendingConditionalInsts.clear();
255 
256     // Clear the IT state
257     ITState.Mask = 0;
258     ITState.CurPosition = ~0U;
259   }
260 
261   bool inITBlock() { return ITState.CurPosition != ~0U; }
262   bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
263   bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
264 
265   bool lastInITBlock() {
266     return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
267   }
268 
269   void forwardITPosition() {
270     if (!inITBlock()) return;
271     // Move to the next instruction in the IT block, if there is one. If not,
272     // mark the block as done, except for implicit IT blocks, which we leave
273     // open until we find an instruction that can't be added to it.
274     unsigned TZ = countTrailingZeros(ITState.Mask);
275     if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
276       ITState.CurPosition = ~0U; // Done with the IT block after this.
277   }
278 
279   // Rewind the state of the current IT block, removing the last slot from it.
280   void rewindImplicitITPosition() {
281     assert(inImplicitITBlock());
282     assert(ITState.CurPosition > 1);
283     ITState.CurPosition--;
284     unsigned TZ = countTrailingZeros(ITState.Mask);
285     unsigned NewMask = 0;
286     NewMask |= ITState.Mask & (0xC << TZ);
287     NewMask |= 0x2 << TZ;
288     ITState.Mask = NewMask;
289   }
290 
291   // Rewind the state of the current IT block, removing the last slot from it.
292   // If we were at the first slot, this closes the IT block.
293   void discardImplicitITBlock() {
294     assert(inImplicitITBlock());
295     assert(ITState.CurPosition == 1);
296     ITState.CurPosition = ~0U;
297   }
298 
299   // Return the low-subreg of a given Q register.
300   unsigned getDRegFromQReg(unsigned QReg) const {
301     return MRI->getSubReg(QReg, ARM::dsub_0);
302   }
303 
304   // Get the condition code corresponding to the current IT block slot.
305   ARMCC::CondCodes currentITCond() {
306     unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition);
307     return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond;
308   }
309 
310   // Invert the condition of the current IT block slot without changing any
311   // other slots in the same block.
312   void invertCurrentITCondition() {
313     if (ITState.CurPosition == 1) {
314       ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
315     } else {
316       ITState.Mask ^= 1 << (5 - ITState.CurPosition);
317     }
318   }
319 
320   // Returns true if the current IT block is full (all 4 slots used).
321   bool isITBlockFull() {
322     return inITBlock() && (ITState.Mask & 1);
323   }
324 
325   // Extend the current implicit IT block to have one more slot with the given
326   // condition code.
327   void extendImplicitITBlock(ARMCC::CondCodes Cond) {
328     assert(inImplicitITBlock());
329     assert(!isITBlockFull());
330     assert(Cond == ITState.Cond ||
331            Cond == ARMCC::getOppositeCondition(ITState.Cond));
332     unsigned TZ = countTrailingZeros(ITState.Mask);
333     unsigned NewMask = 0;
334     // Keep any existing condition bits.
335     NewMask |= ITState.Mask & (0xE << TZ);
336     // Insert the new condition bit.
337     NewMask |= (Cond != ITState.Cond) << TZ;
338     // Move the trailing 1 down one bit.
339     NewMask |= 1 << (TZ - 1);
340     ITState.Mask = NewMask;
341   }
342 
343   // Create a new implicit IT block with a dummy condition code.
344   void startImplicitITBlock() {
345     assert(!inITBlock());
346     ITState.Cond = ARMCC::AL;
347     ITState.Mask = 8;
348     ITState.CurPosition = 1;
349     ITState.IsExplicit = false;
350   }
351 
352   // Create a new explicit IT block with the given condition and mask.
353   // The mask should be in the format used in ARMOperand and
354   // MCOperand, with a 1 implying 'e', regardless of the low bit of
355   // the condition.
356   void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
357     assert(!inITBlock());
358     ITState.Cond = Cond;
359     ITState.Mask = Mask;
360     ITState.CurPosition = 0;
361     ITState.IsExplicit = true;
362   }
363 
364   struct {
365     unsigned Mask : 4;
366     unsigned CurPosition;
367   } VPTState;
368   bool inVPTBlock() { return VPTState.CurPosition != ~0U; }
369   void forwardVPTPosition() {
370     if (!inVPTBlock()) return;
371     unsigned TZ = countTrailingZeros(VPTState.Mask);
372     if (++VPTState.CurPosition == 5 - TZ)
373       VPTState.CurPosition = ~0U;
374   }
375 
376   void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
377     return getParser().Note(L, Msg, Range);
378   }
379 
380   bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
381     return getParser().Warning(L, Msg, Range);
382   }
383 
384   bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
385     return getParser().Error(L, Msg, Range);
386   }
387 
388   bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
389                            unsigned ListNo, bool IsARPop = false);
390   bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
391                            unsigned ListNo);
392 
393   int tryParseRegister();
394   bool tryParseRegisterWithWriteBack(OperandVector &);
395   int tryParseShiftRegister(OperandVector &);
396   bool parseRegisterList(OperandVector &, bool EnforceOrder = true);
397   bool parseMemory(OperandVector &);
398   bool parseOperand(OperandVector &, StringRef Mnemonic);
399   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
400   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
401                               unsigned &ShiftAmount);
402   bool parseLiteralValues(unsigned Size, SMLoc L);
403   bool parseDirectiveThumb(SMLoc L);
404   bool parseDirectiveARM(SMLoc L);
405   bool parseDirectiveThumbFunc(SMLoc L);
406   bool parseDirectiveCode(SMLoc L);
407   bool parseDirectiveSyntax(SMLoc L);
408   bool parseDirectiveReq(StringRef Name, SMLoc L);
409   bool parseDirectiveUnreq(SMLoc L);
410   bool parseDirectiveArch(SMLoc L);
411   bool parseDirectiveEabiAttr(SMLoc L);
412   bool parseDirectiveCPU(SMLoc L);
413   bool parseDirectiveFPU(SMLoc L);
414   bool parseDirectiveFnStart(SMLoc L);
415   bool parseDirectiveFnEnd(SMLoc L);
416   bool parseDirectiveCantUnwind(SMLoc L);
417   bool parseDirectivePersonality(SMLoc L);
418   bool parseDirectiveHandlerData(SMLoc L);
419   bool parseDirectiveSetFP(SMLoc L);
420   bool parseDirectivePad(SMLoc L);
421   bool parseDirectiveRegSave(SMLoc L, bool IsVector);
422   bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
423   bool parseDirectiveLtorg(SMLoc L);
424   bool parseDirectiveEven(SMLoc L);
425   bool parseDirectivePersonalityIndex(SMLoc L);
426   bool parseDirectiveUnwindRaw(SMLoc L);
427   bool parseDirectiveTLSDescSeq(SMLoc L);
428   bool parseDirectiveMovSP(SMLoc L);
429   bool parseDirectiveObjectArch(SMLoc L);
430   bool parseDirectiveArchExtension(SMLoc L);
431   bool parseDirectiveAlign(SMLoc L);
432   bool parseDirectiveThumbSet(SMLoc L);
433 
434   bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken);
435   StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken,
436                           unsigned &PredicationCode,
437                           unsigned &VPTPredicationCode, bool &CarrySetting,
438                           unsigned &ProcessorIMod, StringRef &ITMask);
439   void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken,
440                              StringRef FullInst, bool &CanAcceptCarrySet,
441                              bool &CanAcceptPredicationCode,
442                              bool &CanAcceptVPTPredicationCode);
443 
444   void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
445                                      OperandVector &Operands);
446   bool isThumb() const {
447     // FIXME: Can tablegen auto-generate this?
448     return getSTI().getFeatureBits()[ARM::ModeThumb];
449   }
450 
451   bool isThumbOne() const {
452     return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
453   }
454 
455   bool isThumbTwo() const {
456     return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
457   }
458 
459   bool hasThumb() const {
460     return getSTI().getFeatureBits()[ARM::HasV4TOps];
461   }
462 
463   bool hasThumb2() const {
464     return getSTI().getFeatureBits()[ARM::FeatureThumb2];
465   }
466 
467   bool hasV6Ops() const {
468     return getSTI().getFeatureBits()[ARM::HasV6Ops];
469   }
470 
471   bool hasV6T2Ops() const {
472     return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
473   }
474 
475   bool hasV6MOps() const {
476     return getSTI().getFeatureBits()[ARM::HasV6MOps];
477   }
478 
479   bool hasV7Ops() const {
480     return getSTI().getFeatureBits()[ARM::HasV7Ops];
481   }
482 
483   bool hasV8Ops() const {
484     return getSTI().getFeatureBits()[ARM::HasV8Ops];
485   }
486 
487   bool hasV8MBaseline() const {
488     return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
489   }
490 
491   bool hasV8MMainline() const {
492     return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
493   }
494   bool hasV8_1MMainline() const {
495     return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps];
496   }
497   bool hasMVE() const {
498     return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps];
499   }
500   bool hasMVEFloat() const {
501     return getSTI().getFeatureBits()[ARM::HasMVEFloatOps];
502   }
503   bool has8MSecExt() const {
504     return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
505   }
506 
507   bool hasARM() const {
508     return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
509   }
510 
511   bool hasDSP() const {
512     return getSTI().getFeatureBits()[ARM::FeatureDSP];
513   }
514 
515   bool hasD32() const {
516     return getSTI().getFeatureBits()[ARM::FeatureD32];
517   }
518 
519   bool hasV8_1aOps() const {
520     return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
521   }
522 
523   bool hasRAS() const {
524     return getSTI().getFeatureBits()[ARM::FeatureRAS];
525   }
526 
527   void SwitchMode() {
528     MCSubtargetInfo &STI = copySTI();
529     auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
530     setAvailableFeatures(FB);
531   }
532 
533   void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
534 
535   bool isMClass() const {
536     return getSTI().getFeatureBits()[ARM::FeatureMClass];
537   }
538 
539   /// @name Auto-generated Match Functions
540   /// {
541 
542 #define GET_ASSEMBLER_HEADER
543 #include "ARMGenAsmMatcher.inc"
544 
545   /// }
546 
547   OperandMatchResultTy parseITCondCode(OperandVector &);
548   OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
549   OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
550   OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
551   OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
552   OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
553   OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
554   OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
555   OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
556   OperandMatchResultTy parseBankedRegOperand(OperandVector &);
557   OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
558                                    int High);
559   OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
560     return parsePKHImm(O, "lsl", 0, 31);
561   }
562   OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
563     return parsePKHImm(O, "asr", 1, 32);
564   }
565   OperandMatchResultTy parseSetEndImm(OperandVector &);
566   OperandMatchResultTy parseShifterImm(OperandVector &);
567   OperandMatchResultTy parseRotImm(OperandVector &);
568   OperandMatchResultTy parseModImm(OperandVector &);
569   OperandMatchResultTy parseBitfield(OperandVector &);
570   OperandMatchResultTy parsePostIdxReg(OperandVector &);
571   OperandMatchResultTy parseAM3Offset(OperandVector &);
572   OperandMatchResultTy parseFPImm(OperandVector &);
573   OperandMatchResultTy parseVectorList(OperandVector &);
574   OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
575                                        SMLoc &EndLoc);
576 
577   // Asm Match Converter Methods
578   void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
579   void cvtThumbBranches(MCInst &Inst, const OperandVector &);
580 
581   bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
582   bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
583   bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
584   bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
585   bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
586   bool isITBlockTerminator(MCInst &Inst) const;
587   void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
588   bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
589                         bool Load, bool ARMMode, bool Writeback);
590 
591 public:
592   enum ARMMatchResultTy {
593     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
594     Match_RequiresNotITBlock,
595     Match_RequiresV6,
596     Match_RequiresThumb2,
597     Match_RequiresV8,
598     Match_RequiresFlagSetting,
599 #define GET_OPERAND_DIAGNOSTIC_TYPES
600 #include "ARMGenAsmMatcher.inc"
601 
602   };
603 
604   ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
605                const MCInstrInfo &MII, const MCTargetOptions &Options)
606     : MCTargetAsmParser(Options, STI, MII), UC(Parser) {
607     MCAsmParserExtension::Initialize(Parser);
608 
609     // Cache the MCRegisterInfo.
610     MRI = getContext().getRegisterInfo();
611 
612     // Initialize the set of available features.
613     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
614 
615     // Add build attributes based on the selected target.
616     if (AddBuildAttributes)
617       getTargetStreamer().emitTargetAttributes(STI);
618 
619     // Not in an ITBlock to start with.
620     ITState.CurPosition = ~0U;
621 
622     VPTState.CurPosition = ~0U;
623 
624     NextSymbolIsThumb = false;
625   }
626 
627   // Implementation of the MCTargetAsmParser interface:
628   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
629   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
630                         SMLoc NameLoc, OperandVector &Operands) override;
631   bool ParseDirective(AsmToken DirectiveID) override;
632 
633   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
634                                       unsigned Kind) override;
635   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
636 
637   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
638                                OperandVector &Operands, MCStreamer &Out,
639                                uint64_t &ErrorInfo,
640                                bool MatchingInlineAsm) override;
641   unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
642                             SmallVectorImpl<NearMissInfo> &NearMisses,
643                             bool MatchingInlineAsm, bool &EmitInITBlock,
644                             MCStreamer &Out);
645 
646   struct NearMissMessage {
647     SMLoc Loc;
648     SmallString<128> Message;
649   };
650 
651   const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
652 
653   void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
654                         SmallVectorImpl<NearMissMessage> &NearMissesOut,
655                         SMLoc IDLoc, OperandVector &Operands);
656   void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
657                         OperandVector &Operands);
658 
659   void doBeforeLabelEmit(MCSymbol *Symbol) override;
660 
661   void onLabelParsed(MCSymbol *Symbol) override;
662 };
663 
664 /// ARMOperand - Instances of this class represent a parsed ARM machine
665 /// operand.
666 class ARMOperand : public MCParsedAsmOperand {
667   enum KindTy {
668     k_CondCode,
669     k_VPTPred,
670     k_CCOut,
671     k_ITCondMask,
672     k_CoprocNum,
673     k_CoprocReg,
674     k_CoprocOption,
675     k_Immediate,
676     k_MemBarrierOpt,
677     k_InstSyncBarrierOpt,
678     k_TraceSyncBarrierOpt,
679     k_Memory,
680     k_PostIndexRegister,
681     k_MSRMask,
682     k_BankedReg,
683     k_ProcIFlags,
684     k_VectorIndex,
685     k_Register,
686     k_RegisterList,
687     k_RegisterListWithAPSR,
688     k_DPRRegisterList,
689     k_SPRRegisterList,
690     k_FPSRegisterListWithVPR,
691     k_FPDRegisterListWithVPR,
692     k_VectorList,
693     k_VectorListAllLanes,
694     k_VectorListIndexed,
695     k_ShiftedRegister,
696     k_ShiftedImmediate,
697     k_ShifterImmediate,
698     k_RotateImmediate,
699     k_ModifiedImmediate,
700     k_ConstantPoolImmediate,
701     k_BitfieldDescriptor,
702     k_Token,
703   } Kind;
704 
705   SMLoc StartLoc, EndLoc, AlignmentLoc;
706   SmallVector<unsigned, 8> Registers;
707 
708   struct CCOp {
709     ARMCC::CondCodes Val;
710   };
711 
712   struct VCCOp {
713     ARMVCC::VPTCodes Val;
714   };
715 
716   struct CopOp {
717     unsigned Val;
718   };
719 
720   struct CoprocOptionOp {
721     unsigned Val;
722   };
723 
724   struct ITMaskOp {
725     unsigned Mask:4;
726   };
727 
728   struct MBOptOp {
729     ARM_MB::MemBOpt Val;
730   };
731 
732   struct ISBOptOp {
733     ARM_ISB::InstSyncBOpt Val;
734   };
735 
736   struct TSBOptOp {
737     ARM_TSB::TraceSyncBOpt Val;
738   };
739 
740   struct IFlagsOp {
741     ARM_PROC::IFlags Val;
742   };
743 
744   struct MMaskOp {
745     unsigned Val;
746   };
747 
748   struct BankedRegOp {
749     unsigned Val;
750   };
751 
752   struct TokOp {
753     const char *Data;
754     unsigned Length;
755   };
756 
757   struct RegOp {
758     unsigned RegNum;
759   };
760 
761   // A vector register list is a sequential list of 1 to 4 registers.
762   struct VectorListOp {
763     unsigned RegNum;
764     unsigned Count;
765     unsigned LaneIndex;
766     bool isDoubleSpaced;
767   };
768 
769   struct VectorIndexOp {
770     unsigned Val;
771   };
772 
773   struct ImmOp {
774     const MCExpr *Val;
775   };
776 
777   /// Combined record for all forms of ARM address expressions.
778   struct MemoryOp {
779     unsigned BaseRegNum;
780     // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
781     // was specified.
782     const MCConstantExpr *OffsetImm;  // Offset immediate value
783     unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
784     ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
785     unsigned ShiftImm;        // shift for OffsetReg.
786     unsigned Alignment;       // 0 = no alignment specified
787     // n = alignment in bytes (2, 4, 8, 16, or 32)
788     unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
789   };
790 
791   struct PostIdxRegOp {
792     unsigned RegNum;
793     bool isAdd;
794     ARM_AM::ShiftOpc ShiftTy;
795     unsigned ShiftImm;
796   };
797 
798   struct ShifterImmOp {
799     bool isASR;
800     unsigned Imm;
801   };
802 
803   struct RegShiftedRegOp {
804     ARM_AM::ShiftOpc ShiftTy;
805     unsigned SrcReg;
806     unsigned ShiftReg;
807     unsigned ShiftImm;
808   };
809 
810   struct RegShiftedImmOp {
811     ARM_AM::ShiftOpc ShiftTy;
812     unsigned SrcReg;
813     unsigned ShiftImm;
814   };
815 
816   struct RotImmOp {
817     unsigned Imm;
818   };
819 
820   struct ModImmOp {
821     unsigned Bits;
822     unsigned Rot;
823   };
824 
825   struct BitfieldOp {
826     unsigned LSB;
827     unsigned Width;
828   };
829 
830   union {
831     struct CCOp CC;
832     struct VCCOp VCC;
833     struct CopOp Cop;
834     struct CoprocOptionOp CoprocOption;
835     struct MBOptOp MBOpt;
836     struct ISBOptOp ISBOpt;
837     struct TSBOptOp TSBOpt;
838     struct ITMaskOp ITMask;
839     struct IFlagsOp IFlags;
840     struct MMaskOp MMask;
841     struct BankedRegOp BankedReg;
842     struct TokOp Tok;
843     struct RegOp Reg;
844     struct VectorListOp VectorList;
845     struct VectorIndexOp VectorIndex;
846     struct ImmOp Imm;
847     struct MemoryOp Memory;
848     struct PostIdxRegOp PostIdxReg;
849     struct ShifterImmOp ShifterImm;
850     struct RegShiftedRegOp RegShiftedReg;
851     struct RegShiftedImmOp RegShiftedImm;
852     struct RotImmOp RotImm;
853     struct ModImmOp ModImm;
854     struct BitfieldOp Bitfield;
855   };
856 
857 public:
858   ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
859 
860   /// getStartLoc - Get the location of the first token of this operand.
861   SMLoc getStartLoc() const override { return StartLoc; }
862 
863   /// getEndLoc - Get the location of the last token of this operand.
864   SMLoc getEndLoc() const override { return EndLoc; }
865 
866   /// getLocRange - Get the range between the first and last token of this
867   /// operand.
868   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
869 
870   /// getAlignmentLoc - Get the location of the Alignment token of this operand.
871   SMLoc getAlignmentLoc() const {
872     assert(Kind == k_Memory && "Invalid access!");
873     return AlignmentLoc;
874   }
875 
876   ARMCC::CondCodes getCondCode() const {
877     assert(Kind == k_CondCode && "Invalid access!");
878     return CC.Val;
879   }
880 
881   ARMVCC::VPTCodes getVPTPred() const {
882     assert(isVPTPred() && "Invalid access!");
883     return VCC.Val;
884   }
885 
886   unsigned getCoproc() const {
887     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
888     return Cop.Val;
889   }
890 
891   StringRef getToken() const {
892     assert(Kind == k_Token && "Invalid access!");
893     return StringRef(Tok.Data, Tok.Length);
894   }
895 
896   unsigned getReg() const override {
897     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
898     return Reg.RegNum;
899   }
900 
901   const SmallVectorImpl<unsigned> &getRegList() const {
902     assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR ||
903             Kind == k_DPRRegisterList || Kind == k_SPRRegisterList ||
904             Kind == k_FPSRegisterListWithVPR ||
905             Kind == k_FPDRegisterListWithVPR) &&
906            "Invalid access!");
907     return Registers;
908   }
909 
910   const MCExpr *getImm() const {
911     assert(isImm() && "Invalid access!");
912     return Imm.Val;
913   }
914 
915   const MCExpr *getConstantPoolImm() const {
916     assert(isConstantPoolImm() && "Invalid access!");
917     return Imm.Val;
918   }
919 
920   unsigned getVectorIndex() const {
921     assert(Kind == k_VectorIndex && "Invalid access!");
922     return VectorIndex.Val;
923   }
924 
925   ARM_MB::MemBOpt getMemBarrierOpt() const {
926     assert(Kind == k_MemBarrierOpt && "Invalid access!");
927     return MBOpt.Val;
928   }
929 
930   ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
931     assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
932     return ISBOpt.Val;
933   }
934 
935   ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
936     assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
937     return TSBOpt.Val;
938   }
939 
940   ARM_PROC::IFlags getProcIFlags() const {
941     assert(Kind == k_ProcIFlags && "Invalid access!");
942     return IFlags.Val;
943   }
944 
945   unsigned getMSRMask() const {
946     assert(Kind == k_MSRMask && "Invalid access!");
947     return MMask.Val;
948   }
949 
950   unsigned getBankedReg() const {
951     assert(Kind == k_BankedReg && "Invalid access!");
952     return BankedReg.Val;
953   }
954 
955   bool isCoprocNum() const { return Kind == k_CoprocNum; }
956   bool isCoprocReg() const { return Kind == k_CoprocReg; }
957   bool isCoprocOption() const { return Kind == k_CoprocOption; }
958   bool isCondCode() const { return Kind == k_CondCode; }
959   bool isVPTPred() const { return Kind == k_VPTPred; }
960   bool isCCOut() const { return Kind == k_CCOut; }
961   bool isITMask() const { return Kind == k_ITCondMask; }
962   bool isITCondCode() const { return Kind == k_CondCode; }
963   bool isImm() const override {
964     return Kind == k_Immediate;
965   }
966 
967   bool isARMBranchTarget() const {
968     if (!isImm()) return false;
969 
970     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
971       return CE->getValue() % 4 == 0;
972     return true;
973   }
974 
975 
976   bool isThumbBranchTarget() const {
977     if (!isImm()) return false;
978 
979     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
980       return CE->getValue() % 2 == 0;
981     return true;
982   }
983 
984   // checks whether this operand is an unsigned offset which fits is a field
985   // of specified width and scaled by a specific number of bits
986   template<unsigned width, unsigned scale>
987   bool isUnsignedOffset() const {
988     if (!isImm()) return false;
989     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
990     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
991       int64_t Val = CE->getValue();
992       int64_t Align = 1LL << scale;
993       int64_t Max = Align * ((1LL << width) - 1);
994       return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
995     }
996     return false;
997   }
998 
999   // checks whether this operand is an signed offset which fits is a field
1000   // of specified width and scaled by a specific number of bits
1001   template<unsigned width, unsigned scale>
1002   bool isSignedOffset() const {
1003     if (!isImm()) return false;
1004     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1005     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1006       int64_t Val = CE->getValue();
1007       int64_t Align = 1LL << scale;
1008       int64_t Max = Align * ((1LL << (width-1)) - 1);
1009       int64_t Min = -Align * (1LL << (width-1));
1010       return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
1011     }
1012     return false;
1013   }
1014 
1015   // checks whether this operand is a memory operand computed as an offset
1016   // applied to PC. the offset may have 8 bits of magnitude and is represented
1017   // with two bits of shift. textually it may be either [pc, #imm], #imm or
1018   // relocable expression...
1019   bool isThumbMemPC() const {
1020     int64_t Val = 0;
1021     if (isImm()) {
1022       if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
1023       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
1024       if (!CE) return false;
1025       Val = CE->getValue();
1026     }
1027     else if (isMem()) {
1028       if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
1029       if(Memory.BaseRegNum != ARM::PC) return false;
1030       Val = Memory.OffsetImm->getValue();
1031     }
1032     else return false;
1033     return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
1034   }
1035 
1036   bool isFPImm() const {
1037     if (!isImm()) return false;
1038     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1039     if (!CE) return false;
1040     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1041     return Val != -1;
1042   }
1043 
1044   template<int64_t N, int64_t M>
1045   bool isImmediate() const {
1046     if (!isImm()) return false;
1047     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1048     if (!CE) return false;
1049     int64_t Value = CE->getValue();
1050     return Value >= N && Value <= M;
1051   }
1052 
1053   template<int64_t N, int64_t M>
1054   bool isImmediateS4() const {
1055     if (!isImm()) return false;
1056     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1057     if (!CE) return false;
1058     int64_t Value = CE->getValue();
1059     return ((Value & 3) == 0) && Value >= N && Value <= M;
1060   }
1061 
1062   bool isFBits16() const {
1063     return isImmediate<0, 17>();
1064   }
1065   bool isFBits32() const {
1066     return isImmediate<1, 33>();
1067   }
1068   bool isImm8s4() const {
1069     return isImmediateS4<-1020, 1020>();
1070   }
1071   bool isImm7s4() const {
1072     return isImmediateS4<-508, 508>();
1073   }
1074   bool isImm0_1020s4() const {
1075     return isImmediateS4<0, 1020>();
1076   }
1077   bool isImm0_508s4() const {
1078     return isImmediateS4<0, 508>();
1079   }
1080   bool isImm0_508s4Neg() const {
1081     if (!isImm()) return false;
1082     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1083     if (!CE) return false;
1084     int64_t Value = -CE->getValue();
1085     // explicitly exclude zero. we want that to use the normal 0_508 version.
1086     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1087   }
1088 
1089   bool isImm0_4095Neg() const {
1090     if (!isImm()) return false;
1091     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1092     if (!CE) return false;
1093     // isImm0_4095Neg is used with 32-bit immediates only.
1094     // 32-bit immediates are zero extended to 64-bit when parsed,
1095     // thus simple -CE->getValue() results in a big negative number,
1096     // not a small positive number as intended
1097     if ((CE->getValue() >> 32) > 0) return false;
1098     uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1099     return Value > 0 && Value < 4096;
1100   }
1101 
1102   bool isImm0_7() const {
1103     return isImmediate<0, 7>();
1104   }
1105 
1106   bool isImm1_16() const {
1107     return isImmediate<1, 16>();
1108   }
1109 
1110   bool isImm1_32() const {
1111     return isImmediate<1, 32>();
1112   }
1113 
1114   bool isImm8_255() const {
1115     return isImmediate<8, 255>();
1116   }
1117 
1118   bool isImm256_65535Expr() const {
1119     if (!isImm()) return false;
1120     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1121     // If it's not a constant expression, it'll generate a fixup and be
1122     // handled later.
1123     if (!CE) return true;
1124     int64_t Value = CE->getValue();
1125     return Value >= 256 && Value < 65536;
1126   }
1127 
1128   bool isImm0_65535Expr() const {
1129     if (!isImm()) return false;
1130     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1131     // If it's not a constant expression, it'll generate a fixup and be
1132     // handled later.
1133     if (!CE) return true;
1134     int64_t Value = CE->getValue();
1135     return Value >= 0 && Value < 65536;
1136   }
1137 
1138   bool isImm24bit() const {
1139     return isImmediate<0, 0xffffff + 1>();
1140   }
1141 
1142   bool isImmThumbSR() const {
1143     return isImmediate<1, 33>();
1144   }
1145 
1146   bool isPKHLSLImm() const {
1147     return isImmediate<0, 32>();
1148   }
1149 
1150   bool isPKHASRImm() const {
1151     return isImmediate<0, 33>();
1152   }
1153 
1154   bool isAdrLabel() const {
1155     // If we have an immediate that's not a constant, treat it as a label
1156     // reference needing a fixup.
1157     if (isImm() && !isa<MCConstantExpr>(getImm()))
1158       return true;
1159 
1160     // If it is a constant, it must fit into a modified immediate encoding.
1161     if (!isImm()) return false;
1162     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1163     if (!CE) return false;
1164     int64_t Value = CE->getValue();
1165     return (ARM_AM::getSOImmVal(Value) != -1 ||
1166             ARM_AM::getSOImmVal(-Value) != -1);
1167   }
1168 
1169   bool isT2SOImm() const {
1170     // If we have an immediate that's not a constant, treat it as an expression
1171     // needing a fixup.
1172     if (isImm() && !isa<MCConstantExpr>(getImm())) {
1173       // We want to avoid matching :upper16: and :lower16: as we want these
1174       // expressions to match in isImm0_65535Expr()
1175       const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1176       return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1177                              ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1178     }
1179     if (!isImm()) return false;
1180     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1181     if (!CE) return false;
1182     int64_t Value = CE->getValue();
1183     return ARM_AM::getT2SOImmVal(Value) != -1;
1184   }
1185 
1186   bool isT2SOImmNot() const {
1187     if (!isImm()) return false;
1188     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1189     if (!CE) return false;
1190     int64_t Value = CE->getValue();
1191     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1192       ARM_AM::getT2SOImmVal(~Value) != -1;
1193   }
1194 
1195   bool isT2SOImmNeg() const {
1196     if (!isImm()) return false;
1197     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1198     if (!CE) return false;
1199     int64_t Value = CE->getValue();
1200     // Only use this when not representable as a plain so_imm.
1201     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1202       ARM_AM::getT2SOImmVal(-Value) != -1;
1203   }
1204 
1205   bool isSetEndImm() const {
1206     if (!isImm()) return false;
1207     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1208     if (!CE) return false;
1209     int64_t Value = CE->getValue();
1210     return Value == 1 || Value == 0;
1211   }
1212 
1213   bool isReg() const override { return Kind == k_Register; }
1214   bool isRegList() const { return Kind == k_RegisterList; }
1215   bool isRegListWithAPSR() const {
1216     return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList;
1217   }
1218   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1219   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1220   bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; }
1221   bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; }
1222   bool isToken() const override { return Kind == k_Token; }
1223   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1224   bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1225   bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1226   bool isMem() const override {
1227     if (Kind != k_Memory)
1228       return false;
1229     if (Memory.BaseRegNum &&
1230         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1231       return false;
1232     if (Memory.OffsetRegNum &&
1233         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1234       return false;
1235     return true;
1236   }
1237   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1238   bool isRegShiftedReg() const {
1239     return Kind == k_ShiftedRegister &&
1240            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1241                RegShiftedReg.SrcReg) &&
1242            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1243                RegShiftedReg.ShiftReg);
1244   }
1245   bool isRegShiftedImm() const {
1246     return Kind == k_ShiftedImmediate &&
1247            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1248                RegShiftedImm.SrcReg);
1249   }
1250   bool isRotImm() const { return Kind == k_RotateImmediate; }
1251   bool isModImm() const { return Kind == k_ModifiedImmediate; }
1252 
1253   bool isModImmNot() const {
1254     if (!isImm()) return false;
1255     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1256     if (!CE) return false;
1257     int64_t Value = CE->getValue();
1258     return ARM_AM::getSOImmVal(~Value) != -1;
1259   }
1260 
1261   bool isModImmNeg() const {
1262     if (!isImm()) return false;
1263     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1264     if (!CE) return false;
1265     int64_t Value = CE->getValue();
1266     return ARM_AM::getSOImmVal(Value) == -1 &&
1267       ARM_AM::getSOImmVal(-Value) != -1;
1268   }
1269 
1270   bool isThumbModImmNeg1_7() const {
1271     if (!isImm()) return false;
1272     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1273     if (!CE) return false;
1274     int32_t Value = -(int32_t)CE->getValue();
1275     return 0 < Value && Value < 8;
1276   }
1277 
1278   bool isThumbModImmNeg8_255() const {
1279     if (!isImm()) return false;
1280     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1281     if (!CE) return false;
1282     int32_t Value = -(int32_t)CE->getValue();
1283     return 7 < Value && Value < 256;
1284   }
1285 
1286   bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1287   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1288   bool isPostIdxRegShifted() const {
1289     return Kind == k_PostIndexRegister &&
1290            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1291   }
1292   bool isPostIdxReg() const {
1293     return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1294   }
1295   bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1296     if (!isMem())
1297       return false;
1298     // No offset of any kind.
1299     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1300      (alignOK || Memory.Alignment == Alignment);
1301   }
1302   bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const {
1303     if (!isMem())
1304       return false;
1305 
1306     if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1307             Memory.BaseRegNum))
1308       return false;
1309 
1310     // No offset of any kind.
1311     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1312      (alignOK || Memory.Alignment == Alignment);
1313   }
1314   bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const {
1315     if (!isMem())
1316       return false;
1317 
1318     if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains(
1319             Memory.BaseRegNum))
1320       return false;
1321 
1322     // No offset of any kind.
1323     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1324      (alignOK || Memory.Alignment == Alignment);
1325   }
1326   bool isMemPCRelImm12() const {
1327     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1328       return false;
1329     // Base register must be PC.
1330     if (Memory.BaseRegNum != ARM::PC)
1331       return false;
1332     // Immediate offset in range [-4095, 4095].
1333     if (!Memory.OffsetImm) return true;
1334     int64_t Val = Memory.OffsetImm->getValue();
1335     return (Val > -4096 && Val < 4096) ||
1336            (Val == std::numeric_limits<int32_t>::min());
1337   }
1338 
1339   bool isAlignedMemory() const {
1340     return isMemNoOffset(true);
1341   }
1342 
1343   bool isAlignedMemoryNone() const {
1344     return isMemNoOffset(false, 0);
1345   }
1346 
1347   bool isDupAlignedMemoryNone() const {
1348     return isMemNoOffset(false, 0);
1349   }
1350 
1351   bool isAlignedMemory16() const {
1352     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1353       return true;
1354     return isMemNoOffset(false, 0);
1355   }
1356 
1357   bool isDupAlignedMemory16() const {
1358     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1359       return true;
1360     return isMemNoOffset(false, 0);
1361   }
1362 
1363   bool isAlignedMemory32() const {
1364     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1365       return true;
1366     return isMemNoOffset(false, 0);
1367   }
1368 
1369   bool isDupAlignedMemory32() const {
1370     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1371       return true;
1372     return isMemNoOffset(false, 0);
1373   }
1374 
1375   bool isAlignedMemory64() const {
1376     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1377       return true;
1378     return isMemNoOffset(false, 0);
1379   }
1380 
1381   bool isDupAlignedMemory64() const {
1382     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1383       return true;
1384     return isMemNoOffset(false, 0);
1385   }
1386 
1387   bool isAlignedMemory64or128() const {
1388     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1389       return true;
1390     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1391       return true;
1392     return isMemNoOffset(false, 0);
1393   }
1394 
1395   bool isDupAlignedMemory64or128() const {
1396     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1397       return true;
1398     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1399       return true;
1400     return isMemNoOffset(false, 0);
1401   }
1402 
1403   bool isAlignedMemory64or128or256() const {
1404     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1405       return true;
1406     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1407       return true;
1408     if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1409       return true;
1410     return isMemNoOffset(false, 0);
1411   }
1412 
1413   bool isAddrMode2() const {
1414     if (!isMem() || Memory.Alignment != 0) return false;
1415     // Check for register offset.
1416     if (Memory.OffsetRegNum) return true;
1417     // Immediate offset in range [-4095, 4095].
1418     if (!Memory.OffsetImm) return true;
1419     int64_t Val = Memory.OffsetImm->getValue();
1420     return Val > -4096 && Val < 4096;
1421   }
1422 
1423   bool isAM2OffsetImm() const {
1424     if (!isImm()) return false;
1425     // Immediate offset in range [-4095, 4095].
1426     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1427     if (!CE) return false;
1428     int64_t Val = CE->getValue();
1429     return (Val == std::numeric_limits<int32_t>::min()) ||
1430            (Val > -4096 && Val < 4096);
1431   }
1432 
1433   bool isAddrMode3() const {
1434     // If we have an immediate that's not a constant, treat it as a label
1435     // reference needing a fixup. If it is a constant, it's something else
1436     // and we reject it.
1437     if (isImm() && !isa<MCConstantExpr>(getImm()))
1438       return true;
1439     if (!isMem() || Memory.Alignment != 0) return false;
1440     // No shifts are legal for AM3.
1441     if (Memory.ShiftType != ARM_AM::no_shift) return false;
1442     // Check for register offset.
1443     if (Memory.OffsetRegNum) return true;
1444     // Immediate offset in range [-255, 255].
1445     if (!Memory.OffsetImm) return true;
1446     int64_t Val = Memory.OffsetImm->getValue();
1447     // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we
1448     // have to check for this too.
1449     return (Val > -256 && Val < 256) ||
1450            Val == std::numeric_limits<int32_t>::min();
1451   }
1452 
1453   bool isAM3Offset() const {
1454     if (isPostIdxReg())
1455       return true;
1456     if (!isImm())
1457       return false;
1458     // Immediate offset in range [-255, 255].
1459     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1460     if (!CE) return false;
1461     int64_t Val = CE->getValue();
1462     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1463     return (Val > -256 && Val < 256) ||
1464            Val == std::numeric_limits<int32_t>::min();
1465   }
1466 
1467   bool isAddrMode5() const {
1468     // If we have an immediate that's not a constant, treat it as a label
1469     // reference needing a fixup. If it is a constant, it's something else
1470     // and we reject it.
1471     if (isImm() && !isa<MCConstantExpr>(getImm()))
1472       return true;
1473     if (!isMem() || Memory.Alignment != 0) return false;
1474     // Check for register offset.
1475     if (Memory.OffsetRegNum) return false;
1476     // Immediate offset in range [-1020, 1020] and a multiple of 4.
1477     if (!Memory.OffsetImm) return true;
1478     int64_t Val = Memory.OffsetImm->getValue();
1479     return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1480       Val == std::numeric_limits<int32_t>::min();
1481   }
1482 
1483   bool isAddrMode5FP16() const {
1484     // If we have an immediate that's not a constant, treat it as a label
1485     // reference needing a fixup. If it is a constant, it's something else
1486     // and we reject it.
1487     if (isImm() && !isa<MCConstantExpr>(getImm()))
1488       return true;
1489     if (!isMem() || Memory.Alignment != 0) return false;
1490     // Check for register offset.
1491     if (Memory.OffsetRegNum) return false;
1492     // Immediate offset in range [-510, 510] and a multiple of 2.
1493     if (!Memory.OffsetImm) return true;
1494     int64_t Val = Memory.OffsetImm->getValue();
1495     return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1496            Val == std::numeric_limits<int32_t>::min();
1497   }
1498 
1499   bool isMemTBB() const {
1500     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1501         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1502       return false;
1503     return true;
1504   }
1505 
1506   bool isMemTBH() const {
1507     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1508         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1509         Memory.Alignment != 0 )
1510       return false;
1511     return true;
1512   }
1513 
1514   bool isMemRegOffset() const {
1515     if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1516       return false;
1517     return true;
1518   }
1519 
1520   bool isT2MemRegOffset() const {
1521     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1522         Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1523       return false;
1524     // Only lsl #{0, 1, 2, 3} allowed.
1525     if (Memory.ShiftType == ARM_AM::no_shift)
1526       return true;
1527     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1528       return false;
1529     return true;
1530   }
1531 
1532   bool isMemThumbRR() const {
1533     // Thumb reg+reg addressing is simple. Just two registers, a base and
1534     // an offset. No shifts, negations or any other complicating factors.
1535     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1536         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1537       return false;
1538     return isARMLowRegister(Memory.BaseRegNum) &&
1539       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1540   }
1541 
1542   bool isMemThumbRIs4() const {
1543     if (!isMem() || Memory.OffsetRegNum != 0 ||
1544         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1545       return false;
1546     // Immediate offset, multiple of 4 in range [0, 124].
1547     if (!Memory.OffsetImm) return true;
1548     int64_t Val = Memory.OffsetImm->getValue();
1549     return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1550   }
1551 
1552   bool isMemThumbRIs2() const {
1553     if (!isMem() || Memory.OffsetRegNum != 0 ||
1554         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1555       return false;
1556     // Immediate offset, multiple of 4 in range [0, 62].
1557     if (!Memory.OffsetImm) return true;
1558     int64_t Val = Memory.OffsetImm->getValue();
1559     return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1560   }
1561 
1562   bool isMemThumbRIs1() const {
1563     if (!isMem() || Memory.OffsetRegNum != 0 ||
1564         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1565       return false;
1566     // Immediate offset in range [0, 31].
1567     if (!Memory.OffsetImm) return true;
1568     int64_t Val = Memory.OffsetImm->getValue();
1569     return Val >= 0 && Val <= 31;
1570   }
1571 
1572   bool isMemThumbSPI() const {
1573     if (!isMem() || Memory.OffsetRegNum != 0 ||
1574         Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1575       return false;
1576     // Immediate offset, multiple of 4 in range [0, 1020].
1577     if (!Memory.OffsetImm) return true;
1578     int64_t Val = Memory.OffsetImm->getValue();
1579     return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1580   }
1581 
1582   bool isMemImm8s4Offset() const {
1583     // If we have an immediate that's not a constant, treat it as a label
1584     // reference needing a fixup. If it is a constant, it's something else
1585     // and we reject it.
1586     if (isImm() && !isa<MCConstantExpr>(getImm()))
1587       return true;
1588     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1589       return false;
1590     // Immediate offset a multiple of 4 in range [-1020, 1020].
1591     if (!Memory.OffsetImm) return true;
1592     int64_t Val = Memory.OffsetImm->getValue();
1593     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1594     return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1595            Val == std::numeric_limits<int32_t>::min();
1596   }
1597   bool isMemImm7s4Offset() const {
1598     // If we have an immediate that's not a constant, treat it as a label
1599     // reference needing a fixup. If it is a constant, it's something else
1600     // and we reject it.
1601     if (isImm() && !isa<MCConstantExpr>(getImm()))
1602       return true;
1603     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 ||
1604         !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains(
1605             Memory.BaseRegNum))
1606       return false;
1607     // Immediate offset a multiple of 4 in range [-508, 508].
1608     if (!Memory.OffsetImm) return true;
1609     int64_t Val = Memory.OffsetImm->getValue();
1610     // Special case, #-0 is INT32_MIN.
1611     return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN;
1612   }
1613   bool isMemImm0_1020s4Offset() const {
1614     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1615       return false;
1616     // Immediate offset a multiple of 4 in range [0, 1020].
1617     if (!Memory.OffsetImm) return true;
1618     int64_t Val = Memory.OffsetImm->getValue();
1619     return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1620   }
1621 
1622   bool isMemImm8Offset() const {
1623     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1624       return false;
1625     // Base reg of PC isn't allowed for these encodings.
1626     if (Memory.BaseRegNum == ARM::PC) return false;
1627     // Immediate offset in range [-255, 255].
1628     if (!Memory.OffsetImm) return true;
1629     int64_t Val = Memory.OffsetImm->getValue();
1630     return (Val == std::numeric_limits<int32_t>::min()) ||
1631            (Val > -256 && Val < 256);
1632   }
1633 
1634   bool isMemPosImm8Offset() const {
1635     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1636       return false;
1637     // Immediate offset in range [0, 255].
1638     if (!Memory.OffsetImm) return true;
1639     int64_t Val = Memory.OffsetImm->getValue();
1640     return Val >= 0 && Val < 256;
1641   }
1642 
1643   bool isMemNegImm8Offset() const {
1644     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1645       return false;
1646     // Base reg of PC isn't allowed for these encodings.
1647     if (Memory.BaseRegNum == ARM::PC) return false;
1648     // Immediate offset in range [-255, -1].
1649     if (!Memory.OffsetImm) return false;
1650     int64_t Val = Memory.OffsetImm->getValue();
1651     return (Val == std::numeric_limits<int32_t>::min()) ||
1652            (Val > -256 && Val < 0);
1653   }
1654 
1655   bool isMemUImm12Offset() const {
1656     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1657       return false;
1658     // Immediate offset in range [0, 4095].
1659     if (!Memory.OffsetImm) return true;
1660     int64_t Val = Memory.OffsetImm->getValue();
1661     return (Val >= 0 && Val < 4096);
1662   }
1663 
1664   bool isMemImm12Offset() const {
1665     // If we have an immediate that's not a constant, treat it as a label
1666     // reference needing a fixup. If it is a constant, it's something else
1667     // and we reject it.
1668 
1669     if (isImm() && !isa<MCConstantExpr>(getImm()))
1670       return true;
1671 
1672     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1673       return false;
1674     // Immediate offset in range [-4095, 4095].
1675     if (!Memory.OffsetImm) return true;
1676     int64_t Val = Memory.OffsetImm->getValue();
1677     return (Val > -4096 && Val < 4096) ||
1678            (Val == std::numeric_limits<int32_t>::min());
1679   }
1680 
1681   bool isConstPoolAsmImm() const {
1682     // Delay processing of Constant Pool Immediate, this will turn into
1683     // a constant. Match no other operand
1684     return (isConstantPoolImm());
1685   }
1686 
1687   bool isPostIdxImm8() const {
1688     if (!isImm()) return false;
1689     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1690     if (!CE) return false;
1691     int64_t Val = CE->getValue();
1692     return (Val > -256 && Val < 256) ||
1693            (Val == std::numeric_limits<int32_t>::min());
1694   }
1695 
1696   bool isPostIdxImm8s4() const {
1697     if (!isImm()) return false;
1698     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1699     if (!CE) return false;
1700     int64_t Val = CE->getValue();
1701     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1702            (Val == std::numeric_limits<int32_t>::min());
1703   }
1704 
1705   bool isMSRMask() const { return Kind == k_MSRMask; }
1706   bool isBankedReg() const { return Kind == k_BankedReg; }
1707   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1708 
1709   // NEON operands.
1710   bool isSingleSpacedVectorList() const {
1711     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1712   }
1713 
1714   bool isDoubleSpacedVectorList() const {
1715     return Kind == k_VectorList && VectorList.isDoubleSpaced;
1716   }
1717 
1718   bool isVecListOneD() const {
1719     if (!isSingleSpacedVectorList()) return false;
1720     return VectorList.Count == 1;
1721   }
1722 
1723   bool isVecListDPair() const {
1724     if (!isSingleSpacedVectorList()) return false;
1725     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1726               .contains(VectorList.RegNum));
1727   }
1728 
1729   bool isVecListThreeD() const {
1730     if (!isSingleSpacedVectorList()) return false;
1731     return VectorList.Count == 3;
1732   }
1733 
1734   bool isVecListFourD() const {
1735     if (!isSingleSpacedVectorList()) return false;
1736     return VectorList.Count == 4;
1737   }
1738 
1739   bool isVecListDPairSpaced() const {
1740     if (Kind != k_VectorList) return false;
1741     if (isSingleSpacedVectorList()) return false;
1742     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1743               .contains(VectorList.RegNum));
1744   }
1745 
1746   bool isVecListThreeQ() const {
1747     if (!isDoubleSpacedVectorList()) return false;
1748     return VectorList.Count == 3;
1749   }
1750 
1751   bool isVecListFourQ() const {
1752     if (!isDoubleSpacedVectorList()) return false;
1753     return VectorList.Count == 4;
1754   }
1755 
1756   bool isSingleSpacedVectorAllLanes() const {
1757     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1758   }
1759 
1760   bool isDoubleSpacedVectorAllLanes() const {
1761     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1762   }
1763 
1764   bool isVecListOneDAllLanes() const {
1765     if (!isSingleSpacedVectorAllLanes()) return false;
1766     return VectorList.Count == 1;
1767   }
1768 
1769   bool isVecListDPairAllLanes() const {
1770     if (!isSingleSpacedVectorAllLanes()) return false;
1771     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1772               .contains(VectorList.RegNum));
1773   }
1774 
1775   bool isVecListDPairSpacedAllLanes() const {
1776     if (!isDoubleSpacedVectorAllLanes()) return false;
1777     return VectorList.Count == 2;
1778   }
1779 
1780   bool isVecListThreeDAllLanes() const {
1781     if (!isSingleSpacedVectorAllLanes()) return false;
1782     return VectorList.Count == 3;
1783   }
1784 
1785   bool isVecListThreeQAllLanes() const {
1786     if (!isDoubleSpacedVectorAllLanes()) return false;
1787     return VectorList.Count == 3;
1788   }
1789 
1790   bool isVecListFourDAllLanes() const {
1791     if (!isSingleSpacedVectorAllLanes()) return false;
1792     return VectorList.Count == 4;
1793   }
1794 
1795   bool isVecListFourQAllLanes() const {
1796     if (!isDoubleSpacedVectorAllLanes()) return false;
1797     return VectorList.Count == 4;
1798   }
1799 
1800   bool isSingleSpacedVectorIndexed() const {
1801     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1802   }
1803 
1804   bool isDoubleSpacedVectorIndexed() const {
1805     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1806   }
1807 
1808   bool isVecListOneDByteIndexed() const {
1809     if (!isSingleSpacedVectorIndexed()) return false;
1810     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1811   }
1812 
1813   bool isVecListOneDHWordIndexed() const {
1814     if (!isSingleSpacedVectorIndexed()) return false;
1815     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1816   }
1817 
1818   bool isVecListOneDWordIndexed() const {
1819     if (!isSingleSpacedVectorIndexed()) return false;
1820     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1821   }
1822 
1823   bool isVecListTwoDByteIndexed() const {
1824     if (!isSingleSpacedVectorIndexed()) return false;
1825     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1826   }
1827 
1828   bool isVecListTwoDHWordIndexed() const {
1829     if (!isSingleSpacedVectorIndexed()) return false;
1830     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1831   }
1832 
1833   bool isVecListTwoQWordIndexed() const {
1834     if (!isDoubleSpacedVectorIndexed()) return false;
1835     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1836   }
1837 
1838   bool isVecListTwoQHWordIndexed() const {
1839     if (!isDoubleSpacedVectorIndexed()) return false;
1840     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1841   }
1842 
1843   bool isVecListTwoDWordIndexed() const {
1844     if (!isSingleSpacedVectorIndexed()) return false;
1845     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1846   }
1847 
1848   bool isVecListThreeDByteIndexed() const {
1849     if (!isSingleSpacedVectorIndexed()) return false;
1850     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1851   }
1852 
1853   bool isVecListThreeDHWordIndexed() const {
1854     if (!isSingleSpacedVectorIndexed()) return false;
1855     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1856   }
1857 
1858   bool isVecListThreeQWordIndexed() const {
1859     if (!isDoubleSpacedVectorIndexed()) return false;
1860     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1861   }
1862 
1863   bool isVecListThreeQHWordIndexed() const {
1864     if (!isDoubleSpacedVectorIndexed()) return false;
1865     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1866   }
1867 
1868   bool isVecListThreeDWordIndexed() const {
1869     if (!isSingleSpacedVectorIndexed()) return false;
1870     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1871   }
1872 
1873   bool isVecListFourDByteIndexed() const {
1874     if (!isSingleSpacedVectorIndexed()) return false;
1875     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1876   }
1877 
1878   bool isVecListFourDHWordIndexed() const {
1879     if (!isSingleSpacedVectorIndexed()) return false;
1880     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1881   }
1882 
1883   bool isVecListFourQWordIndexed() const {
1884     if (!isDoubleSpacedVectorIndexed()) return false;
1885     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1886   }
1887 
1888   bool isVecListFourQHWordIndexed() const {
1889     if (!isDoubleSpacedVectorIndexed()) return false;
1890     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1891   }
1892 
1893   bool isVecListFourDWordIndexed() const {
1894     if (!isSingleSpacedVectorIndexed()) return false;
1895     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1896   }
1897 
1898   bool isVectorIndex() const { return Kind == k_VectorIndex; }
1899 
1900   bool isVectorIndex8() const {
1901     if (Kind != k_VectorIndex) return false;
1902     return VectorIndex.Val < 8;
1903   }
1904 
1905   bool isVectorIndex16() const {
1906     if (Kind != k_VectorIndex) return false;
1907     return VectorIndex.Val < 4;
1908   }
1909 
1910   bool isVectorIndex32() const {
1911     if (Kind != k_VectorIndex) return false;
1912     return VectorIndex.Val < 2;
1913   }
1914   bool isVectorIndex64() const {
1915     if (Kind != k_VectorIndex) return false;
1916     return VectorIndex.Val < 1;
1917   }
1918 
1919   bool isNEONi8splat() const {
1920     if (!isImm()) return false;
1921     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1922     // Must be a constant.
1923     if (!CE) return false;
1924     int64_t Value = CE->getValue();
1925     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1926     // value.
1927     return Value >= 0 && Value < 256;
1928   }
1929 
1930   bool isNEONi16splat() const {
1931     if (isNEONByteReplicate(2))
1932       return false; // Leave that for bytes replication and forbid by default.
1933     if (!isImm())
1934       return false;
1935     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1936     // Must be a constant.
1937     if (!CE) return false;
1938     unsigned Value = CE->getValue();
1939     return ARM_AM::isNEONi16splat(Value);
1940   }
1941 
1942   bool isNEONi16splatNot() const {
1943     if (!isImm())
1944       return false;
1945     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1946     // Must be a constant.
1947     if (!CE) return false;
1948     unsigned Value = CE->getValue();
1949     return ARM_AM::isNEONi16splat(~Value & 0xffff);
1950   }
1951 
1952   bool isNEONi32splat() const {
1953     if (isNEONByteReplicate(4))
1954       return false; // Leave that for bytes replication and forbid by default.
1955     if (!isImm())
1956       return false;
1957     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1958     // Must be a constant.
1959     if (!CE) return false;
1960     unsigned Value = CE->getValue();
1961     return ARM_AM::isNEONi32splat(Value);
1962   }
1963 
1964   bool isNEONi32splatNot() const {
1965     if (!isImm())
1966       return false;
1967     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1968     // Must be a constant.
1969     if (!CE) return false;
1970     unsigned Value = CE->getValue();
1971     return ARM_AM::isNEONi32splat(~Value);
1972   }
1973 
1974   static bool isValidNEONi32vmovImm(int64_t Value) {
1975     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1976     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1977     return ((Value & 0xffffffffffffff00) == 0) ||
1978            ((Value & 0xffffffffffff00ff) == 0) ||
1979            ((Value & 0xffffffffff00ffff) == 0) ||
1980            ((Value & 0xffffffff00ffffff) == 0) ||
1981            ((Value & 0xffffffffffff00ff) == 0xff) ||
1982            ((Value & 0xffffffffff00ffff) == 0xffff);
1983   }
1984 
1985   bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
1986     assert((Width == 8 || Width == 16 || Width == 32) &&
1987            "Invalid element width");
1988     assert(NumElems * Width <= 64 && "Invalid result width");
1989 
1990     if (!isImm())
1991       return false;
1992     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1993     // Must be a constant.
1994     if (!CE)
1995       return false;
1996     int64_t Value = CE->getValue();
1997     if (!Value)
1998       return false; // Don't bother with zero.
1999     if (Inv)
2000       Value = ~Value;
2001 
2002     uint64_t Mask = (1ull << Width) - 1;
2003     uint64_t Elem = Value & Mask;
2004     if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
2005       return false;
2006     if (Width == 32 && !isValidNEONi32vmovImm(Elem))
2007       return false;
2008 
2009     for (unsigned i = 1; i < NumElems; ++i) {
2010       Value >>= Width;
2011       if ((Value & Mask) != Elem)
2012         return false;
2013     }
2014     return true;
2015   }
2016 
2017   bool isNEONByteReplicate(unsigned NumBytes) const {
2018     return isNEONReplicate(8, NumBytes, false);
2019   }
2020 
2021   static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
2022     assert((FromW == 8 || FromW == 16 || FromW == 32) &&
2023            "Invalid source width");
2024     assert((ToW == 16 || ToW == 32 || ToW == 64) &&
2025            "Invalid destination width");
2026     assert(FromW < ToW && "ToW is not less than FromW");
2027   }
2028 
2029   template<unsigned FromW, unsigned ToW>
2030   bool isNEONmovReplicate() const {
2031     checkNeonReplicateArgs(FromW, ToW);
2032     if (ToW == 64 && isNEONi64splat())
2033       return false;
2034     return isNEONReplicate(FromW, ToW / FromW, false);
2035   }
2036 
2037   template<unsigned FromW, unsigned ToW>
2038   bool isNEONinvReplicate() const {
2039     checkNeonReplicateArgs(FromW, ToW);
2040     return isNEONReplicate(FromW, ToW / FromW, true);
2041   }
2042 
2043   bool isNEONi32vmov() const {
2044     if (isNEONByteReplicate(4))
2045       return false; // Let it to be classified as byte-replicate case.
2046     if (!isImm())
2047       return false;
2048     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2049     // Must be a constant.
2050     if (!CE)
2051       return false;
2052     return isValidNEONi32vmovImm(CE->getValue());
2053   }
2054 
2055   bool isNEONi32vmovNeg() const {
2056     if (!isImm()) return false;
2057     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2058     // Must be a constant.
2059     if (!CE) return false;
2060     return isValidNEONi32vmovImm(~CE->getValue());
2061   }
2062 
2063   bool isNEONi64splat() const {
2064     if (!isImm()) return false;
2065     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2066     // Must be a constant.
2067     if (!CE) return false;
2068     uint64_t Value = CE->getValue();
2069     // i64 value with each byte being either 0 or 0xff.
2070     for (unsigned i = 0; i < 8; ++i, Value >>= 8)
2071       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
2072     return true;
2073   }
2074 
2075   template<int64_t Angle, int64_t Remainder>
2076   bool isComplexRotation() const {
2077     if (!isImm()) return false;
2078 
2079     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2080     if (!CE) return false;
2081     uint64_t Value = CE->getValue();
2082 
2083     return (Value % Angle == Remainder && Value <= 270);
2084   }
2085 
2086   bool isMVELongShift() const {
2087     if (!isImm()) return false;
2088     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2089     // Must be a constant.
2090     if (!CE) return false;
2091     uint64_t Value = CE->getValue();
2092     return Value >= 1 && Value <= 32;
2093   }
2094 
2095   bool isITCondCodeNoAL() const {
2096     if (!isITCondCode()) return false;
2097     ARMCC::CondCodes CC = getCondCode();
2098     return CC != ARMCC::AL;
2099   }
2100 
2101   bool isITCondCodeRestrictedI() const {
2102     if (!isITCondCode())
2103       return false;
2104     ARMCC::CondCodes CC = getCondCode();
2105     return CC == ARMCC::EQ || CC == ARMCC::NE;
2106   }
2107 
2108   bool isITCondCodeRestrictedS() const {
2109     if (!isITCondCode())
2110       return false;
2111     ARMCC::CondCodes CC = getCondCode();
2112     return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE ||
2113            CC == ARMCC::GE;
2114   }
2115 
2116   bool isITCondCodeRestrictedU() const {
2117     if (!isITCondCode())
2118       return false;
2119     ARMCC::CondCodes CC = getCondCode();
2120     return CC == ARMCC::HS || CC == ARMCC::HI;
2121   }
2122 
2123   bool isITCondCodeRestrictedFP() const {
2124     if (!isITCondCode())
2125       return false;
2126     ARMCC::CondCodes CC = getCondCode();
2127     return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT ||
2128            CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE;
2129   }
2130 
2131   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
2132     // Add as immediates when possible.  Null MCExpr = 0.
2133     if (!Expr)
2134       Inst.addOperand(MCOperand::createImm(0));
2135     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2136       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2137     else
2138       Inst.addOperand(MCOperand::createExpr(Expr));
2139   }
2140 
2141   void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2142     assert(N == 1 && "Invalid number of operands!");
2143     addExpr(Inst, getImm());
2144   }
2145 
2146   void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2147     assert(N == 1 && "Invalid number of operands!");
2148     addExpr(Inst, getImm());
2149   }
2150 
2151   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2152     assert(N == 2 && "Invalid number of operands!");
2153     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2154     unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2155     Inst.addOperand(MCOperand::createReg(RegNum));
2156   }
2157 
2158   void addVPTPredNOperands(MCInst &Inst, unsigned N) const {
2159     assert(N == 2 && "Invalid number of operands!");
2160     Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred())));
2161     unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0;
2162     Inst.addOperand(MCOperand::createReg(RegNum));
2163   }
2164 
2165   void addVPTPredROperands(MCInst &Inst, unsigned N) const {
2166     assert(N == 3 && "Invalid number of operands!");
2167     addVPTPredNOperands(Inst, N-1);
2168     unsigned RegNum;
2169     if (getVPTPred() == ARMVCC::None) {
2170       RegNum = 0;
2171     } else {
2172       unsigned NextOpIndex = Inst.getNumOperands();
2173       const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()];
2174       int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO);
2175       assert(TiedOp >= 0 &&
2176              "Inactive register in vpred_r is not tied to an output!");
2177       RegNum = Inst.getOperand(TiedOp).getReg();
2178     }
2179     Inst.addOperand(MCOperand::createReg(RegNum));
2180   }
2181 
2182   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2183     assert(N == 1 && "Invalid number of operands!");
2184     Inst.addOperand(MCOperand::createImm(getCoproc()));
2185   }
2186 
2187   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2188     assert(N == 1 && "Invalid number of operands!");
2189     Inst.addOperand(MCOperand::createImm(getCoproc()));
2190   }
2191 
2192   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2193     assert(N == 1 && "Invalid number of operands!");
2194     Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2195   }
2196 
2197   void addITMaskOperands(MCInst &Inst, unsigned N) const {
2198     assert(N == 1 && "Invalid number of operands!");
2199     Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2200   }
2201 
2202   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2203     assert(N == 1 && "Invalid number of operands!");
2204     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2205   }
2206 
2207   void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const {
2208     assert(N == 1 && "Invalid number of operands!");
2209     Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode()))));
2210   }
2211 
2212   void addCCOutOperands(MCInst &Inst, unsigned N) const {
2213     assert(N == 1 && "Invalid number of operands!");
2214     Inst.addOperand(MCOperand::createReg(getReg()));
2215   }
2216 
2217   void addRegOperands(MCInst &Inst, unsigned N) const {
2218     assert(N == 1 && "Invalid number of operands!");
2219     Inst.addOperand(MCOperand::createReg(getReg()));
2220   }
2221 
2222   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2223     assert(N == 3 && "Invalid number of operands!");
2224     assert(isRegShiftedReg() &&
2225            "addRegShiftedRegOperands() on non-RegShiftedReg!");
2226     Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2227     Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2228     Inst.addOperand(MCOperand::createImm(
2229       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2230   }
2231 
2232   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2233     assert(N == 2 && "Invalid number of operands!");
2234     assert(isRegShiftedImm() &&
2235            "addRegShiftedImmOperands() on non-RegShiftedImm!");
2236     Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2237     // Shift of #32 is encoded as 0 where permitted
2238     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2239     Inst.addOperand(MCOperand::createImm(
2240       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2241   }
2242 
2243   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2244     assert(N == 1 && "Invalid number of operands!");
2245     Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2246                                          ShifterImm.Imm));
2247   }
2248 
2249   void addRegListOperands(MCInst &Inst, unsigned N) const {
2250     assert(N == 1 && "Invalid number of operands!");
2251     const SmallVectorImpl<unsigned> &RegList = getRegList();
2252     for (SmallVectorImpl<unsigned>::const_iterator
2253            I = RegList.begin(), E = RegList.end(); I != E; ++I)
2254       Inst.addOperand(MCOperand::createReg(*I));
2255   }
2256 
2257   void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const {
2258     assert(N == 1 && "Invalid number of operands!");
2259     const SmallVectorImpl<unsigned> &RegList = getRegList();
2260     for (SmallVectorImpl<unsigned>::const_iterator
2261            I = RegList.begin(), E = RegList.end(); I != E; ++I)
2262       Inst.addOperand(MCOperand::createReg(*I));
2263   }
2264 
2265   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2266     addRegListOperands(Inst, N);
2267   }
2268 
2269   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2270     addRegListOperands(Inst, N);
2271   }
2272 
2273   void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2274     addRegListOperands(Inst, N);
2275   }
2276 
2277   void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const {
2278     addRegListOperands(Inst, N);
2279   }
2280 
2281   void addRotImmOperands(MCInst &Inst, unsigned N) const {
2282     assert(N == 1 && "Invalid number of operands!");
2283     // Encoded as val>>3. The printer handles display as 8, 16, 24.
2284     Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2285   }
2286 
2287   void addModImmOperands(MCInst &Inst, unsigned N) const {
2288     assert(N == 1 && "Invalid number of operands!");
2289 
2290     // Support for fixups (MCFixup)
2291     if (isImm())
2292       return addImmOperands(Inst, N);
2293 
2294     Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2295   }
2296 
2297   void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2298     assert(N == 1 && "Invalid number of operands!");
2299     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2300     uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2301     Inst.addOperand(MCOperand::createImm(Enc));
2302   }
2303 
2304   void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2305     assert(N == 1 && "Invalid number of operands!");
2306     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2307     uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2308     Inst.addOperand(MCOperand::createImm(Enc));
2309   }
2310 
2311   void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2312     assert(N == 1 && "Invalid number of operands!");
2313     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2314     uint32_t Val = -CE->getValue();
2315     Inst.addOperand(MCOperand::createImm(Val));
2316   }
2317 
2318   void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2319     assert(N == 1 && "Invalid number of operands!");
2320     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2321     uint32_t Val = -CE->getValue();
2322     Inst.addOperand(MCOperand::createImm(Val));
2323   }
2324 
2325   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2326     assert(N == 1 && "Invalid number of operands!");
2327     // Munge the lsb/width into a bitfield mask.
2328     unsigned lsb = Bitfield.LSB;
2329     unsigned width = Bitfield.Width;
2330     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2331     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2332                       (32 - (lsb + width)));
2333     Inst.addOperand(MCOperand::createImm(Mask));
2334   }
2335 
2336   void addImmOperands(MCInst &Inst, unsigned N) const {
2337     assert(N == 1 && "Invalid number of operands!");
2338     addExpr(Inst, getImm());
2339   }
2340 
2341   void addFBits16Operands(MCInst &Inst, unsigned N) const {
2342     assert(N == 1 && "Invalid number of operands!");
2343     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2344     Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2345   }
2346 
2347   void addFBits32Operands(MCInst &Inst, unsigned N) const {
2348     assert(N == 1 && "Invalid number of operands!");
2349     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2350     Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2351   }
2352 
2353   void addFPImmOperands(MCInst &Inst, unsigned N) const {
2354     assert(N == 1 && "Invalid number of operands!");
2355     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2356     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2357     Inst.addOperand(MCOperand::createImm(Val));
2358   }
2359 
2360   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2361     assert(N == 1 && "Invalid number of operands!");
2362     // FIXME: We really want to scale the value here, but the LDRD/STRD
2363     // instruction don't encode operands that way yet.
2364     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2365     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2366   }
2367 
2368   void addImm7s4Operands(MCInst &Inst, unsigned N) const {
2369     assert(N == 1 && "Invalid number of operands!");
2370     // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR
2371     // instruction don't encode operands that way yet.
2372     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2373     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2374   }
2375 
2376   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2377     assert(N == 1 && "Invalid number of operands!");
2378     // The immediate is scaled by four in the encoding and is stored
2379     // in the MCInst as such. Lop off the low two bits here.
2380     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2381     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2382   }
2383 
2384   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2385     assert(N == 1 && "Invalid number of operands!");
2386     // The immediate is scaled by four in the encoding and is stored
2387     // in the MCInst as such. Lop off the low two bits here.
2388     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2389     Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2390   }
2391 
2392   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2393     assert(N == 1 && "Invalid number of operands!");
2394     // The immediate is scaled by four in the encoding and is stored
2395     // in the MCInst as such. Lop off the low two bits here.
2396     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2397     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2398   }
2399 
2400   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2401     assert(N == 1 && "Invalid number of operands!");
2402     // The constant encodes as the immediate-1, and we store in the instruction
2403     // the bits as encoded, so subtract off one here.
2404     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2405     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2406   }
2407 
2408   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2409     assert(N == 1 && "Invalid number of operands!");
2410     // The constant encodes as the immediate-1, and we store in the instruction
2411     // the bits as encoded, so subtract off one here.
2412     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2413     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2414   }
2415 
2416   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2417     assert(N == 1 && "Invalid number of operands!");
2418     // The constant encodes as the immediate, except for 32, which encodes as
2419     // zero.
2420     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2421     unsigned Imm = CE->getValue();
2422     Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2423   }
2424 
2425   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2426     assert(N == 1 && "Invalid number of operands!");
2427     // An ASR value of 32 encodes as 0, so that's how we want to add it to
2428     // the instruction as well.
2429     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2430     int Val = CE->getValue();
2431     Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2432   }
2433 
2434   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2435     assert(N == 1 && "Invalid number of operands!");
2436     // The operand is actually a t2_so_imm, but we have its bitwise
2437     // negation in the assembly source, so twiddle it here.
2438     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2439     Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2440   }
2441 
2442   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2443     assert(N == 1 && "Invalid number of operands!");
2444     // The operand is actually a t2_so_imm, but we have its
2445     // negation in the assembly source, so twiddle it here.
2446     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2447     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2448   }
2449 
2450   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2451     assert(N == 1 && "Invalid number of operands!");
2452     // The operand is actually an imm0_4095, but we have its
2453     // negation in the assembly source, so twiddle it here.
2454     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2455     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2456   }
2457 
2458   void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2459     if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2460       Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2461       return;
2462     }
2463 
2464     const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2465     assert(SR && "Unknown value type!");
2466     Inst.addOperand(MCOperand::createExpr(SR));
2467   }
2468 
2469   void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2470     assert(N == 1 && "Invalid number of operands!");
2471     if (isImm()) {
2472       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2473       if (CE) {
2474         Inst.addOperand(MCOperand::createImm(CE->getValue()));
2475         return;
2476       }
2477 
2478       const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2479 
2480       assert(SR && "Unknown value type!");
2481       Inst.addOperand(MCOperand::createExpr(SR));
2482       return;
2483     }
2484 
2485     assert(isMem()  && "Unknown value type!");
2486     assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2487     Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
2488   }
2489 
2490   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2491     assert(N == 1 && "Invalid number of operands!");
2492     Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2493   }
2494 
2495   void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2496     assert(N == 1 && "Invalid number of operands!");
2497     Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2498   }
2499 
2500   void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2501     assert(N == 1 && "Invalid number of operands!");
2502     Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2503   }
2504 
2505   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2506     assert(N == 1 && "Invalid number of operands!");
2507     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2508   }
2509 
2510   void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const {
2511     assert(N == 1 && "Invalid number of operands!");
2512     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2513   }
2514 
2515   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2516     assert(N == 1 && "Invalid number of operands!");
2517     int32_t Imm = Memory.OffsetImm->getValue();
2518     Inst.addOperand(MCOperand::createImm(Imm));
2519   }
2520 
2521   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2522     assert(N == 1 && "Invalid number of operands!");
2523     assert(isImm() && "Not an immediate!");
2524 
2525     // If we have an immediate that's not a constant, treat it as a label
2526     // reference needing a fixup.
2527     if (!isa<MCConstantExpr>(getImm())) {
2528       Inst.addOperand(MCOperand::createExpr(getImm()));
2529       return;
2530     }
2531 
2532     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2533     int Val = CE->getValue();
2534     Inst.addOperand(MCOperand::createImm(Val));
2535   }
2536 
2537   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2538     assert(N == 2 && "Invalid number of operands!");
2539     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2540     Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2541   }
2542 
2543   void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2544     addAlignedMemoryOperands(Inst, N);
2545   }
2546 
2547   void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2548     addAlignedMemoryOperands(Inst, N);
2549   }
2550 
2551   void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2552     addAlignedMemoryOperands(Inst, N);
2553   }
2554 
2555   void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2556     addAlignedMemoryOperands(Inst, N);
2557   }
2558 
2559   void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2560     addAlignedMemoryOperands(Inst, N);
2561   }
2562 
2563   void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2564     addAlignedMemoryOperands(Inst, N);
2565   }
2566 
2567   void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2568     addAlignedMemoryOperands(Inst, N);
2569   }
2570 
2571   void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2572     addAlignedMemoryOperands(Inst, N);
2573   }
2574 
2575   void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2576     addAlignedMemoryOperands(Inst, N);
2577   }
2578 
2579   void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2580     addAlignedMemoryOperands(Inst, N);
2581   }
2582 
2583   void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2584     addAlignedMemoryOperands(Inst, N);
2585   }
2586 
2587   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2588     assert(N == 3 && "Invalid number of operands!");
2589     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2590     if (!Memory.OffsetRegNum) {
2591       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2592       // Special case for #-0
2593       if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2594       if (Val < 0) Val = -Val;
2595       Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2596     } else {
2597       // For register offset, we encode the shift type and negation flag
2598       // here.
2599       Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2600                               Memory.ShiftImm, Memory.ShiftType);
2601     }
2602     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2603     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2604     Inst.addOperand(MCOperand::createImm(Val));
2605   }
2606 
2607   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2608     assert(N == 2 && "Invalid number of operands!");
2609     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2610     assert(CE && "non-constant AM2OffsetImm operand!");
2611     int32_t Val = CE->getValue();
2612     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2613     // Special case for #-0
2614     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2615     if (Val < 0) Val = -Val;
2616     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2617     Inst.addOperand(MCOperand::createReg(0));
2618     Inst.addOperand(MCOperand::createImm(Val));
2619   }
2620 
2621   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2622     assert(N == 3 && "Invalid number of operands!");
2623     // If we have an immediate that's not a constant, treat it as a label
2624     // reference needing a fixup. If it is a constant, it's something else
2625     // and we reject it.
2626     if (isImm()) {
2627       Inst.addOperand(MCOperand::createExpr(getImm()));
2628       Inst.addOperand(MCOperand::createReg(0));
2629       Inst.addOperand(MCOperand::createImm(0));
2630       return;
2631     }
2632 
2633     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2634     if (!Memory.OffsetRegNum) {
2635       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2636       // Special case for #-0
2637       if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2638       if (Val < 0) Val = -Val;
2639       Val = ARM_AM::getAM3Opc(AddSub, Val);
2640     } else {
2641       // For register offset, we encode the shift type and negation flag
2642       // here.
2643       Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2644     }
2645     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2646     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2647     Inst.addOperand(MCOperand::createImm(Val));
2648   }
2649 
2650   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2651     assert(N == 2 && "Invalid number of operands!");
2652     if (Kind == k_PostIndexRegister) {
2653       int32_t Val =
2654         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2655       Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2656       Inst.addOperand(MCOperand::createImm(Val));
2657       return;
2658     }
2659 
2660     // Constant offset.
2661     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2662     int32_t Val = CE->getValue();
2663     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2664     // Special case for #-0
2665     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2666     if (Val < 0) Val = -Val;
2667     Val = ARM_AM::getAM3Opc(AddSub, Val);
2668     Inst.addOperand(MCOperand::createReg(0));
2669     Inst.addOperand(MCOperand::createImm(Val));
2670   }
2671 
2672   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2673     assert(N == 2 && "Invalid number of operands!");
2674     // If we have an immediate that's not a constant, treat it as a label
2675     // reference needing a fixup. If it is a constant, it's something else
2676     // and we reject it.
2677     if (isImm()) {
2678       Inst.addOperand(MCOperand::createExpr(getImm()));
2679       Inst.addOperand(MCOperand::createImm(0));
2680       return;
2681     }
2682 
2683     // The lower two bits are always zero and as such are not encoded.
2684     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2685     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2686     // Special case for #-0
2687     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2688     if (Val < 0) Val = -Val;
2689     Val = ARM_AM::getAM5Opc(AddSub, Val);
2690     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2691     Inst.addOperand(MCOperand::createImm(Val));
2692   }
2693 
2694   void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
2695     assert(N == 2 && "Invalid number of operands!");
2696     // If we have an immediate that's not a constant, treat it as a label
2697     // reference needing a fixup. If it is a constant, it's something else
2698     // and we reject it.
2699     if (isImm()) {
2700       Inst.addOperand(MCOperand::createExpr(getImm()));
2701       Inst.addOperand(MCOperand::createImm(0));
2702       return;
2703     }
2704 
2705     // The lower bit is always zero and as such is not encoded.
2706     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
2707     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2708     // Special case for #-0
2709     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2710     if (Val < 0) Val = -Val;
2711     Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
2712     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2713     Inst.addOperand(MCOperand::createImm(Val));
2714   }
2715 
2716   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2717     assert(N == 2 && "Invalid number of operands!");
2718     // If we have an immediate that's not a constant, treat it as a label
2719     // reference needing a fixup. If it is a constant, it's something else
2720     // and we reject it.
2721     if (isImm()) {
2722       Inst.addOperand(MCOperand::createExpr(getImm()));
2723       Inst.addOperand(MCOperand::createImm(0));
2724       return;
2725     }
2726 
2727     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2728     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2729     Inst.addOperand(MCOperand::createImm(Val));
2730   }
2731 
2732   void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const {
2733     assert(N == 2 && "Invalid number of operands!");
2734     // If we have an immediate that's not a constant, treat it as a label
2735     // reference needing a fixup. If it is a constant, it's something else
2736     // and we reject it.
2737     if (isImm()) {
2738       Inst.addOperand(MCOperand::createExpr(getImm()));
2739       Inst.addOperand(MCOperand::createImm(0));
2740       return;
2741     }
2742 
2743     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2744     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2745     Inst.addOperand(MCOperand::createImm(Val));
2746   }
2747 
2748   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2749     assert(N == 2 && "Invalid number of operands!");
2750     // The lower two bits are always zero and as such are not encoded.
2751     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2752     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2753     Inst.addOperand(MCOperand::createImm(Val));
2754   }
2755 
2756   void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2757     assert(N == 2 && "Invalid number of operands!");
2758     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2759     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2760     Inst.addOperand(MCOperand::createImm(Val));
2761   }
2762 
2763   void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2764     addMemImm8OffsetOperands(Inst, N);
2765   }
2766 
2767   void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2768     addMemImm8OffsetOperands(Inst, N);
2769   }
2770 
2771   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2772     assert(N == 2 && "Invalid number of operands!");
2773     // If this is an immediate, it's a label reference.
2774     if (isImm()) {
2775       addExpr(Inst, getImm());
2776       Inst.addOperand(MCOperand::createImm(0));
2777       return;
2778     }
2779 
2780     // Otherwise, it's a normal memory reg+offset.
2781     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2782     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2783     Inst.addOperand(MCOperand::createImm(Val));
2784   }
2785 
2786   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2787     assert(N == 2 && "Invalid number of operands!");
2788     // If this is an immediate, it's a label reference.
2789     if (isImm()) {
2790       addExpr(Inst, getImm());
2791       Inst.addOperand(MCOperand::createImm(0));
2792       return;
2793     }
2794 
2795     // Otherwise, it's a normal memory reg+offset.
2796     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2797     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2798     Inst.addOperand(MCOperand::createImm(Val));
2799   }
2800 
2801   void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
2802     assert(N == 1 && "Invalid number of operands!");
2803     // This is container for the immediate that we will create the constant
2804     // pool from
2805     addExpr(Inst, getConstantPoolImm());
2806     return;
2807   }
2808 
2809   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2810     assert(N == 2 && "Invalid number of operands!");
2811     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2812     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2813   }
2814 
2815   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2816     assert(N == 2 && "Invalid number of operands!");
2817     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2818     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2819   }
2820 
2821   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2822     assert(N == 3 && "Invalid number of operands!");
2823     unsigned Val =
2824       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2825                         Memory.ShiftImm, Memory.ShiftType);
2826     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2827     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2828     Inst.addOperand(MCOperand::createImm(Val));
2829   }
2830 
2831   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2832     assert(N == 3 && "Invalid number of operands!");
2833     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2834     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2835     Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
2836   }
2837 
2838   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2839     assert(N == 2 && "Invalid number of operands!");
2840     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2841     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2842   }
2843 
2844   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2845     assert(N == 2 && "Invalid number of operands!");
2846     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2847     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2848     Inst.addOperand(MCOperand::createImm(Val));
2849   }
2850 
2851   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2852     assert(N == 2 && "Invalid number of operands!");
2853     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2854     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2855     Inst.addOperand(MCOperand::createImm(Val));
2856   }
2857 
2858   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2859     assert(N == 2 && "Invalid number of operands!");
2860     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2861     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2862     Inst.addOperand(MCOperand::createImm(Val));
2863   }
2864 
2865   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2866     assert(N == 2 && "Invalid number of operands!");
2867     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2868     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2869     Inst.addOperand(MCOperand::createImm(Val));
2870   }
2871 
2872   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2873     assert(N == 1 && "Invalid number of operands!");
2874     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2875     assert(CE && "non-constant post-idx-imm8 operand!");
2876     int Imm = CE->getValue();
2877     bool isAdd = Imm >= 0;
2878     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2879     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2880     Inst.addOperand(MCOperand::createImm(Imm));
2881   }
2882 
2883   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2884     assert(N == 1 && "Invalid number of operands!");
2885     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2886     assert(CE && "non-constant post-idx-imm8s4 operand!");
2887     int Imm = CE->getValue();
2888     bool isAdd = Imm >= 0;
2889     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2890     // Immediate is scaled by 4.
2891     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2892     Inst.addOperand(MCOperand::createImm(Imm));
2893   }
2894 
2895   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2896     assert(N == 2 && "Invalid number of operands!");
2897     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2898     Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
2899   }
2900 
2901   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2902     assert(N == 2 && "Invalid number of operands!");
2903     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2904     // The sign, shift type, and shift amount are encoded in a single operand
2905     // using the AM2 encoding helpers.
2906     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2907     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2908                                      PostIdxReg.ShiftTy);
2909     Inst.addOperand(MCOperand::createImm(Imm));
2910   }
2911 
2912   void addPowerTwoOperands(MCInst &Inst, unsigned N) const {
2913     assert(N == 1 && "Invalid number of operands!");
2914     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2915     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2916   }
2917 
2918   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2919     assert(N == 1 && "Invalid number of operands!");
2920     Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
2921   }
2922 
2923   void addBankedRegOperands(MCInst &Inst, unsigned N) const {
2924     assert(N == 1 && "Invalid number of operands!");
2925     Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
2926   }
2927 
2928   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2929     assert(N == 1 && "Invalid number of operands!");
2930     Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
2931   }
2932 
2933   void addVecListOperands(MCInst &Inst, unsigned N) const {
2934     assert(N == 1 && "Invalid number of operands!");
2935     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2936   }
2937 
2938   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2939     assert(N == 2 && "Invalid number of operands!");
2940     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2941     Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
2942   }
2943 
2944   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2945     assert(N == 1 && "Invalid number of operands!");
2946     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2947   }
2948 
2949   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2950     assert(N == 1 && "Invalid number of operands!");
2951     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2952   }
2953 
2954   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2955     assert(N == 1 && "Invalid number of operands!");
2956     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2957   }
2958 
2959   void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
2960     assert(N == 1 && "Invalid number of operands!");
2961     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2962   }
2963 
2964   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2965     assert(N == 1 && "Invalid number of operands!");
2966     // The immediate encodes the type of constant as well as the value.
2967     // Mask in that this is an i8 splat.
2968     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2969     Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
2970   }
2971 
2972   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2973     assert(N == 1 && "Invalid number of operands!");
2974     // The immediate encodes the type of constant as well as the value.
2975     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2976     unsigned Value = CE->getValue();
2977     Value = ARM_AM::encodeNEONi16splat(Value);
2978     Inst.addOperand(MCOperand::createImm(Value));
2979   }
2980 
2981   void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
2982     assert(N == 1 && "Invalid number of operands!");
2983     // The immediate encodes the type of constant as well as the value.
2984     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2985     unsigned Value = CE->getValue();
2986     Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
2987     Inst.addOperand(MCOperand::createImm(Value));
2988   }
2989 
2990   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2991     assert(N == 1 && "Invalid number of operands!");
2992     // The immediate encodes the type of constant as well as the value.
2993     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2994     unsigned Value = CE->getValue();
2995     Value = ARM_AM::encodeNEONi32splat(Value);
2996     Inst.addOperand(MCOperand::createImm(Value));
2997   }
2998 
2999   void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
3000     assert(N == 1 && "Invalid number of operands!");
3001     // The immediate encodes the type of constant as well as the value.
3002     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3003     unsigned Value = CE->getValue();
3004     Value = ARM_AM::encodeNEONi32splat(~Value);
3005     Inst.addOperand(MCOperand::createImm(Value));
3006   }
3007 
3008   void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
3009     // The immediate encodes the type of constant as well as the value.
3010     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3011     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
3012             Inst.getOpcode() == ARM::VMOVv16i8) &&
3013           "All instructions that wants to replicate non-zero byte "
3014           "always must be replaced with VMOVv8i8 or VMOVv16i8.");
3015     unsigned Value = CE->getValue();
3016     if (Inv)
3017       Value = ~Value;
3018     unsigned B = Value & 0xff;
3019     B |= 0xe00; // cmode = 0b1110
3020     Inst.addOperand(MCOperand::createImm(B));
3021   }
3022 
3023   void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3024     assert(N == 1 && "Invalid number of operands!");
3025     addNEONi8ReplicateOperands(Inst, true);
3026   }
3027 
3028   static unsigned encodeNeonVMOVImmediate(unsigned Value) {
3029     if (Value >= 256 && Value <= 0xffff)
3030       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
3031     else if (Value > 0xffff && Value <= 0xffffff)
3032       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
3033     else if (Value > 0xffffff)
3034       Value = (Value >> 24) | 0x600;
3035     return Value;
3036   }
3037 
3038   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
3039     assert(N == 1 && "Invalid number of operands!");
3040     // The immediate encodes the type of constant as well as the value.
3041     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3042     unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
3043     Inst.addOperand(MCOperand::createImm(Value));
3044   }
3045 
3046   void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
3047     assert(N == 1 && "Invalid number of operands!");
3048     addNEONi8ReplicateOperands(Inst, false);
3049   }
3050 
3051   void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
3052     assert(N == 1 && "Invalid number of operands!");
3053     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3054     assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
3055             Inst.getOpcode() == ARM::VMOVv8i16 ||
3056             Inst.getOpcode() == ARM::VMVNv4i16 ||
3057             Inst.getOpcode() == ARM::VMVNv8i16) &&
3058           "All instructions that want to replicate non-zero half-word "
3059           "always must be replaced with V{MOV,MVN}v{4,8}i16.");
3060     uint64_t Value = CE->getValue();
3061     unsigned Elem = Value & 0xffff;
3062     if (Elem >= 256)
3063       Elem = (Elem >> 8) | 0x200;
3064     Inst.addOperand(MCOperand::createImm(Elem));
3065   }
3066 
3067   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
3068     assert(N == 1 && "Invalid number of operands!");
3069     // The immediate encodes the type of constant as well as the value.
3070     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3071     unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
3072     Inst.addOperand(MCOperand::createImm(Value));
3073   }
3074 
3075   void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
3076     assert(N == 1 && "Invalid number of operands!");
3077     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3078     assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
3079             Inst.getOpcode() == ARM::VMOVv4i32 ||
3080             Inst.getOpcode() == ARM::VMVNv2i32 ||
3081             Inst.getOpcode() == ARM::VMVNv4i32) &&
3082           "All instructions that want to replicate non-zero word "
3083           "always must be replaced with V{MOV,MVN}v{2,4}i32.");
3084     uint64_t Value = CE->getValue();
3085     unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
3086     Inst.addOperand(MCOperand::createImm(Elem));
3087   }
3088 
3089   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
3090     assert(N == 1 && "Invalid number of operands!");
3091     // The immediate encodes the type of constant as well as the value.
3092     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3093     uint64_t Value = CE->getValue();
3094     unsigned Imm = 0;
3095     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
3096       Imm |= (Value & 1) << i;
3097     }
3098     Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
3099   }
3100 
3101   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
3102     assert(N == 1 && "Invalid number of operands!");
3103     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3104     Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
3105   }
3106 
3107   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
3108     assert(N == 1 && "Invalid number of operands!");
3109     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
3110     Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
3111   }
3112 
3113   void print(raw_ostream &OS) const override;
3114 
3115   static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
3116     auto Op = make_unique<ARMOperand>(k_ITCondMask);
3117     Op->ITMask.Mask = Mask;
3118     Op->StartLoc = S;
3119     Op->EndLoc = S;
3120     return Op;
3121   }
3122 
3123   static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
3124                                                     SMLoc S) {
3125     auto Op = make_unique<ARMOperand>(k_CondCode);
3126     Op->CC.Val = CC;
3127     Op->StartLoc = S;
3128     Op->EndLoc = S;
3129     return Op;
3130   }
3131 
3132   static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC,
3133                                                    SMLoc S) {
3134     auto Op = make_unique<ARMOperand>(k_VPTPred);
3135     Op->VCC.Val = CC;
3136     Op->StartLoc = S;
3137     Op->EndLoc = S;
3138     return Op;
3139   }
3140 
3141   static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
3142     auto Op = make_unique<ARMOperand>(k_CoprocNum);
3143     Op->Cop.Val = CopVal;
3144     Op->StartLoc = S;
3145     Op->EndLoc = S;
3146     return Op;
3147   }
3148 
3149   static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
3150     auto Op = make_unique<ARMOperand>(k_CoprocReg);
3151     Op->Cop.Val = CopVal;
3152     Op->StartLoc = S;
3153     Op->EndLoc = S;
3154     return Op;
3155   }
3156 
3157   static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
3158                                                         SMLoc E) {
3159     auto Op = make_unique<ARMOperand>(k_CoprocOption);
3160     Op->Cop.Val = Val;
3161     Op->StartLoc = S;
3162     Op->EndLoc = E;
3163     return Op;
3164   }
3165 
3166   static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
3167     auto Op = make_unique<ARMOperand>(k_CCOut);
3168     Op->Reg.RegNum = RegNum;
3169     Op->StartLoc = S;
3170     Op->EndLoc = S;
3171     return Op;
3172   }
3173 
3174   static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
3175     auto Op = make_unique<ARMOperand>(k_Token);
3176     Op->Tok.Data = Str.data();
3177     Op->Tok.Length = Str.size();
3178     Op->StartLoc = S;
3179     Op->EndLoc = S;
3180     return Op;
3181   }
3182 
3183   static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
3184                                                SMLoc E) {
3185     auto Op = make_unique<ARMOperand>(k_Register);
3186     Op->Reg.RegNum = RegNum;
3187     Op->StartLoc = S;
3188     Op->EndLoc = E;
3189     return Op;
3190   }
3191 
3192   static std::unique_ptr<ARMOperand>
3193   CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3194                         unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
3195                         SMLoc E) {
3196     auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
3197     Op->RegShiftedReg.ShiftTy = ShTy;
3198     Op->RegShiftedReg.SrcReg = SrcReg;
3199     Op->RegShiftedReg.ShiftReg = ShiftReg;
3200     Op->RegShiftedReg.ShiftImm = ShiftImm;
3201     Op->StartLoc = S;
3202     Op->EndLoc = E;
3203     return Op;
3204   }
3205 
3206   static std::unique_ptr<ARMOperand>
3207   CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
3208                          unsigned ShiftImm, SMLoc S, SMLoc E) {
3209     auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
3210     Op->RegShiftedImm.ShiftTy = ShTy;
3211     Op->RegShiftedImm.SrcReg = SrcReg;
3212     Op->RegShiftedImm.ShiftImm = ShiftImm;
3213     Op->StartLoc = S;
3214     Op->EndLoc = E;
3215     return Op;
3216   }
3217 
3218   static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
3219                                                       SMLoc S, SMLoc E) {
3220     auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
3221     Op->ShifterImm.isASR = isASR;
3222     Op->ShifterImm.Imm = Imm;
3223     Op->StartLoc = S;
3224     Op->EndLoc = E;
3225     return Op;
3226   }
3227 
3228   static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3229                                                   SMLoc E) {
3230     auto Op = make_unique<ARMOperand>(k_RotateImmediate);
3231     Op->RotImm.Imm = Imm;
3232     Op->StartLoc = S;
3233     Op->EndLoc = E;
3234     return Op;
3235   }
3236 
3237   static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3238                                                   SMLoc S, SMLoc E) {
3239     auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
3240     Op->ModImm.Bits = Bits;
3241     Op->ModImm.Rot = Rot;
3242     Op->StartLoc = S;
3243     Op->EndLoc = E;
3244     return Op;
3245   }
3246 
3247   static std::unique_ptr<ARMOperand>
3248   CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3249     auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate);
3250     Op->Imm.Val = Val;
3251     Op->StartLoc = S;
3252     Op->EndLoc = E;
3253     return Op;
3254   }
3255 
3256   static std::unique_ptr<ARMOperand>
3257   CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3258     auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
3259     Op->Bitfield.LSB = LSB;
3260     Op->Bitfield.Width = Width;
3261     Op->StartLoc = S;
3262     Op->EndLoc = E;
3263     return Op;
3264   }
3265 
3266   static std::unique_ptr<ARMOperand>
3267   CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3268                 SMLoc StartLoc, SMLoc EndLoc) {
3269     assert(Regs.size() > 0 && "RegList contains no registers?");
3270     KindTy Kind = k_RegisterList;
3271 
3272     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
3273             Regs.front().second)) {
3274       if (Regs.back().second == ARM::VPR)
3275         Kind = k_FPDRegisterListWithVPR;
3276       else
3277         Kind = k_DPRRegisterList;
3278     } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(
3279                    Regs.front().second)) {
3280       if (Regs.back().second == ARM::VPR)
3281         Kind = k_FPSRegisterListWithVPR;
3282       else
3283         Kind = k_SPRRegisterList;
3284     }
3285 
3286     // Sort based on the register encoding values.
3287     array_pod_sort(Regs.begin(), Regs.end());
3288 
3289     if (Kind == k_RegisterList && Regs.back().second == ARM::APSR)
3290       Kind = k_RegisterListWithAPSR;
3291 
3292     auto Op = make_unique<ARMOperand>(Kind);
3293     for (SmallVectorImpl<std::pair<unsigned, unsigned>>::const_iterator
3294            I = Regs.begin(), E = Regs.end(); I != E; ++I)
3295       Op->Registers.push_back(I->second);
3296 
3297     Op->StartLoc = StartLoc;
3298     Op->EndLoc = EndLoc;
3299     return Op;
3300   }
3301 
3302   static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3303                                                       unsigned Count,
3304                                                       bool isDoubleSpaced,
3305                                                       SMLoc S, SMLoc E) {
3306     auto Op = make_unique<ARMOperand>(k_VectorList);
3307     Op->VectorList.RegNum = RegNum;
3308     Op->VectorList.Count = Count;
3309     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3310     Op->StartLoc = S;
3311     Op->EndLoc = E;
3312     return Op;
3313   }
3314 
3315   static std::unique_ptr<ARMOperand>
3316   CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3317                            SMLoc S, SMLoc E) {
3318     auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
3319     Op->VectorList.RegNum = RegNum;
3320     Op->VectorList.Count = Count;
3321     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3322     Op->StartLoc = S;
3323     Op->EndLoc = E;
3324     return Op;
3325   }
3326 
3327   static std::unique_ptr<ARMOperand>
3328   CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3329                           bool isDoubleSpaced, SMLoc S, SMLoc E) {
3330     auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
3331     Op->VectorList.RegNum = RegNum;
3332     Op->VectorList.Count = Count;
3333     Op->VectorList.LaneIndex = Index;
3334     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3335     Op->StartLoc = S;
3336     Op->EndLoc = E;
3337     return Op;
3338   }
3339 
3340   static std::unique_ptr<ARMOperand>
3341   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3342     auto Op = make_unique<ARMOperand>(k_VectorIndex);
3343     Op->VectorIndex.Val = Idx;
3344     Op->StartLoc = S;
3345     Op->EndLoc = E;
3346     return Op;
3347   }
3348 
3349   static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3350                                                SMLoc E) {
3351     auto Op = make_unique<ARMOperand>(k_Immediate);
3352     Op->Imm.Val = Val;
3353     Op->StartLoc = S;
3354     Op->EndLoc = E;
3355     return Op;
3356   }
3357 
3358   static std::unique_ptr<ARMOperand>
3359   CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
3360             unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
3361             unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
3362             SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3363     auto Op = make_unique<ARMOperand>(k_Memory);
3364     Op->Memory.BaseRegNum = BaseRegNum;
3365     Op->Memory.OffsetImm = OffsetImm;
3366     Op->Memory.OffsetRegNum = OffsetRegNum;
3367     Op->Memory.ShiftType = ShiftType;
3368     Op->Memory.ShiftImm = ShiftImm;
3369     Op->Memory.Alignment = Alignment;
3370     Op->Memory.isNegative = isNegative;
3371     Op->StartLoc = S;
3372     Op->EndLoc = E;
3373     Op->AlignmentLoc = AlignmentLoc;
3374     return Op;
3375   }
3376 
3377   static std::unique_ptr<ARMOperand>
3378   CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3379                    unsigned ShiftImm, SMLoc S, SMLoc E) {
3380     auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
3381     Op->PostIdxReg.RegNum = RegNum;
3382     Op->PostIdxReg.isAdd = isAdd;
3383     Op->PostIdxReg.ShiftTy = ShiftTy;
3384     Op->PostIdxReg.ShiftImm = ShiftImm;
3385     Op->StartLoc = S;
3386     Op->EndLoc = E;
3387     return Op;
3388   }
3389 
3390   static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3391                                                          SMLoc S) {
3392     auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
3393     Op->MBOpt.Val = Opt;
3394     Op->StartLoc = S;
3395     Op->EndLoc = S;
3396     return Op;
3397   }
3398 
3399   static std::unique_ptr<ARMOperand>
3400   CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3401     auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3402     Op->ISBOpt.Val = Opt;
3403     Op->StartLoc = S;
3404     Op->EndLoc = S;
3405     return Op;
3406   }
3407 
3408   static std::unique_ptr<ARMOperand>
3409   CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3410     auto Op = make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3411     Op->TSBOpt.Val = Opt;
3412     Op->StartLoc = S;
3413     Op->EndLoc = S;
3414     return Op;
3415   }
3416 
3417   static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3418                                                       SMLoc S) {
3419     auto Op = make_unique<ARMOperand>(k_ProcIFlags);
3420     Op->IFlags.Val = IFlags;
3421     Op->StartLoc = S;
3422     Op->EndLoc = S;
3423     return Op;
3424   }
3425 
3426   static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3427     auto Op = make_unique<ARMOperand>(k_MSRMask);
3428     Op->MMask.Val = MMask;
3429     Op->StartLoc = S;
3430     Op->EndLoc = S;
3431     return Op;
3432   }
3433 
3434   static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3435     auto Op = make_unique<ARMOperand>(k_BankedReg);
3436     Op->BankedReg.Val = Reg;
3437     Op->StartLoc = S;
3438     Op->EndLoc = S;
3439     return Op;
3440   }
3441 };
3442 
3443 } // end anonymous namespace.
3444 
3445 void ARMOperand::print(raw_ostream &OS) const {
3446   auto RegName = [](unsigned Reg) {
3447     if (Reg)
3448       return ARMInstPrinter::getRegisterName(Reg);
3449     else
3450       return "noreg";
3451   };
3452 
3453   switch (Kind) {
3454   case k_CondCode:
3455     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3456     break;
3457   case k_VPTPred:
3458     OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">";
3459     break;
3460   case k_CCOut:
3461     OS << "<ccout " << RegName(getReg()) << ">";
3462     break;
3463   case k_ITCondMask: {
3464     static const char *const MaskStr[] = {
3465       "(invalid)", "(tttt)", "(ttt)", "(ttte)",
3466       "(tt)",      "(ttet)", "(tte)", "(ttee)",
3467       "(t)",       "(tett)", "(tet)", "(tete)",
3468       "(te)",      "(teet)", "(tee)", "(teee)",
3469     };
3470     assert((ITMask.Mask & 0xf) == ITMask.Mask);
3471     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3472     break;
3473   }
3474   case k_CoprocNum:
3475     OS << "<coprocessor number: " << getCoproc() << ">";
3476     break;
3477   case k_CoprocReg:
3478     OS << "<coprocessor register: " << getCoproc() << ">";
3479     break;
3480   case k_CoprocOption:
3481     OS << "<coprocessor option: " << CoprocOption.Val << ">";
3482     break;
3483   case k_MSRMask:
3484     OS << "<mask: " << getMSRMask() << ">";
3485     break;
3486   case k_BankedReg:
3487     OS << "<banked reg: " << getBankedReg() << ">";
3488     break;
3489   case k_Immediate:
3490     OS << *getImm();
3491     break;
3492   case k_MemBarrierOpt:
3493     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3494     break;
3495   case k_InstSyncBarrierOpt:
3496     OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3497     break;
3498   case k_TraceSyncBarrierOpt:
3499     OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3500     break;
3501   case k_Memory:
3502     OS << "<memory";
3503     if (Memory.BaseRegNum)
3504       OS << " base:" << RegName(Memory.BaseRegNum);
3505     if (Memory.OffsetImm)
3506       OS << " offset-imm:" << *Memory.OffsetImm;
3507     if (Memory.OffsetRegNum)
3508       OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3509          << RegName(Memory.OffsetRegNum);
3510     if (Memory.ShiftType != ARM_AM::no_shift) {
3511       OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3512       OS << " shift-imm:" << Memory.ShiftImm;
3513     }
3514     if (Memory.Alignment)
3515       OS << " alignment:" << Memory.Alignment;
3516     OS << ">";
3517     break;
3518   case k_PostIndexRegister:
3519     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3520        << RegName(PostIdxReg.RegNum);
3521     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3522       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3523          << PostIdxReg.ShiftImm;
3524     OS << ">";
3525     break;
3526   case k_ProcIFlags: {
3527     OS << "<ARM_PROC::";
3528     unsigned IFlags = getProcIFlags();
3529     for (int i=2; i >= 0; --i)
3530       if (IFlags & (1 << i))
3531         OS << ARM_PROC::IFlagsToString(1 << i);
3532     OS << ">";
3533     break;
3534   }
3535   case k_Register:
3536     OS << "<register " << RegName(getReg()) << ">";
3537     break;
3538   case k_ShifterImmediate:
3539     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3540        << " #" << ShifterImm.Imm << ">";
3541     break;
3542   case k_ShiftedRegister:
3543     OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3544        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3545        << RegName(RegShiftedReg.ShiftReg) << ">";
3546     break;
3547   case k_ShiftedImmediate:
3548     OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
3549        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
3550        << RegShiftedImm.ShiftImm << ">";
3551     break;
3552   case k_RotateImmediate:
3553     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3554     break;
3555   case k_ModifiedImmediate:
3556     OS << "<mod_imm #" << ModImm.Bits << ", #"
3557        <<  ModImm.Rot << ")>";
3558     break;
3559   case k_ConstantPoolImmediate:
3560     OS << "<constant_pool_imm #" << *getConstantPoolImm();
3561     break;
3562   case k_BitfieldDescriptor:
3563     OS << "<bitfield " << "lsb: " << Bitfield.LSB
3564        << ", width: " << Bitfield.Width << ">";
3565     break;
3566   case k_RegisterList:
3567   case k_RegisterListWithAPSR:
3568   case k_DPRRegisterList:
3569   case k_SPRRegisterList:
3570   case k_FPSRegisterListWithVPR:
3571   case k_FPDRegisterListWithVPR: {
3572     OS << "<register_list ";
3573 
3574     const SmallVectorImpl<unsigned> &RegList = getRegList();
3575     for (SmallVectorImpl<unsigned>::const_iterator
3576            I = RegList.begin(), E = RegList.end(); I != E; ) {
3577       OS << RegName(*I);
3578       if (++I < E) OS << ", ";
3579     }
3580 
3581     OS << ">";
3582     break;
3583   }
3584   case k_VectorList:
3585     OS << "<vector_list " << VectorList.Count << " * "
3586        << RegName(VectorList.RegNum) << ">";
3587     break;
3588   case k_VectorListAllLanes:
3589     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
3590        << RegName(VectorList.RegNum) << ">";
3591     break;
3592   case k_VectorListIndexed:
3593     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
3594        << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
3595     break;
3596   case k_Token:
3597     OS << "'" << getToken() << "'";
3598     break;
3599   case k_VectorIndex:
3600     OS << "<vectorindex " << getVectorIndex() << ">";
3601     break;
3602   }
3603 }
3604 
3605 /// @name Auto-generated Match Functions
3606 /// {
3607 
3608 static unsigned MatchRegisterName(StringRef Name);
3609 
3610 /// }
3611 
3612 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
3613                                  SMLoc &StartLoc, SMLoc &EndLoc) {
3614   const AsmToken &Tok = getParser().getTok();
3615   StartLoc = Tok.getLoc();
3616   EndLoc = Tok.getEndLoc();
3617   RegNo = tryParseRegister();
3618 
3619   return (RegNo == (unsigned)-1);
3620 }
3621 
3622 /// Try to parse a register name.  The token must be an Identifier when called,
3623 /// and if it is a register name the token is eaten and the register number is
3624 /// returned.  Otherwise return -1.
3625 int ARMAsmParser::tryParseRegister() {
3626   MCAsmParser &Parser = getParser();
3627   const AsmToken &Tok = Parser.getTok();
3628   if (Tok.isNot(AsmToken::Identifier)) return -1;
3629 
3630   std::string lowerCase = Tok.getString().lower();
3631   unsigned RegNum = MatchRegisterName(lowerCase);
3632   if (!RegNum) {
3633     RegNum = StringSwitch<unsigned>(lowerCase)
3634       .Case("r13", ARM::SP)
3635       .Case("r14", ARM::LR)
3636       .Case("r15", ARM::PC)
3637       .Case("ip", ARM::R12)
3638       // Additional register name aliases for 'gas' compatibility.
3639       .Case("a1", ARM::R0)
3640       .Case("a2", ARM::R1)
3641       .Case("a3", ARM::R2)
3642       .Case("a4", ARM::R3)
3643       .Case("v1", ARM::R4)
3644       .Case("v2", ARM::R5)
3645       .Case("v3", ARM::R6)
3646       .Case("v4", ARM::R7)
3647       .Case("v5", ARM::R8)
3648       .Case("v6", ARM::R9)
3649       .Case("v7", ARM::R10)
3650       .Case("v8", ARM::R11)
3651       .Case("sb", ARM::R9)
3652       .Case("sl", ARM::R10)
3653       .Case("fp", ARM::R11)
3654       .Default(0);
3655   }
3656   if (!RegNum) {
3657     // Check for aliases registered via .req. Canonicalize to lower case.
3658     // That's more consistent since register names are case insensitive, and
3659     // it's how the original entry was passed in from MC/MCParser/AsmParser.
3660     StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
3661     // If no match, return failure.
3662     if (Entry == RegisterReqs.end())
3663       return -1;
3664     Parser.Lex(); // Eat identifier token.
3665     return Entry->getValue();
3666   }
3667 
3668   // Some FPUs only have 16 D registers, so D16-D31 are invalid
3669   if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
3670     return -1;
3671 
3672   Parser.Lex(); // Eat identifier token.
3673 
3674   return RegNum;
3675 }
3676 
3677 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
3678 // If a recoverable error occurs, return 1. If an irrecoverable error
3679 // occurs, return -1. An irrecoverable error is one where tokens have been
3680 // consumed in the process of trying to parse the shifter (i.e., when it is
3681 // indeed a shifter operand, but malformed).
3682 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
3683   MCAsmParser &Parser = getParser();
3684   SMLoc S = Parser.getTok().getLoc();
3685   const AsmToken &Tok = Parser.getTok();
3686   if (Tok.isNot(AsmToken::Identifier))
3687     return -1;
3688 
3689   std::string lowerCase = Tok.getString().lower();
3690   ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
3691       .Case("asl", ARM_AM::lsl)
3692       .Case("lsl", ARM_AM::lsl)
3693       .Case("lsr", ARM_AM::lsr)
3694       .Case("asr", ARM_AM::asr)
3695       .Case("ror", ARM_AM::ror)
3696       .Case("rrx", ARM_AM::rrx)
3697       .Default(ARM_AM::no_shift);
3698 
3699   if (ShiftTy == ARM_AM::no_shift)
3700     return 1;
3701 
3702   Parser.Lex(); // Eat the operator.
3703 
3704   // The source register for the shift has already been added to the
3705   // operand list, so we need to pop it off and combine it into the shifted
3706   // register operand instead.
3707   std::unique_ptr<ARMOperand> PrevOp(
3708       (ARMOperand *)Operands.pop_back_val().release());
3709   if (!PrevOp->isReg())
3710     return Error(PrevOp->getStartLoc(), "shift must be of a register");
3711   int SrcReg = PrevOp->getReg();
3712 
3713   SMLoc EndLoc;
3714   int64_t Imm = 0;
3715   int ShiftReg = 0;
3716   if (ShiftTy == ARM_AM::rrx) {
3717     // RRX Doesn't have an explicit shift amount. The encoder expects
3718     // the shift register to be the same as the source register. Seems odd,
3719     // but OK.
3720     ShiftReg = SrcReg;
3721   } else {
3722     // Figure out if this is shifted by a constant or a register (for non-RRX).
3723     if (Parser.getTok().is(AsmToken::Hash) ||
3724         Parser.getTok().is(AsmToken::Dollar)) {
3725       Parser.Lex(); // Eat hash.
3726       SMLoc ImmLoc = Parser.getTok().getLoc();
3727       const MCExpr *ShiftExpr = nullptr;
3728       if (getParser().parseExpression(ShiftExpr, EndLoc)) {
3729         Error(ImmLoc, "invalid immediate shift value");
3730         return -1;
3731       }
3732       // The expression must be evaluatable as an immediate.
3733       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
3734       if (!CE) {
3735         Error(ImmLoc, "invalid immediate shift value");
3736         return -1;
3737       }
3738       // Range check the immediate.
3739       // lsl, ror: 0 <= imm <= 31
3740       // lsr, asr: 0 <= imm <= 32
3741       Imm = CE->getValue();
3742       if (Imm < 0 ||
3743           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
3744           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
3745         Error(ImmLoc, "immediate shift value out of range");
3746         return -1;
3747       }
3748       // shift by zero is a nop. Always send it through as lsl.
3749       // ('as' compatibility)
3750       if (Imm == 0)
3751         ShiftTy = ARM_AM::lsl;
3752     } else if (Parser.getTok().is(AsmToken::Identifier)) {
3753       SMLoc L = Parser.getTok().getLoc();
3754       EndLoc = Parser.getTok().getEndLoc();
3755       ShiftReg = tryParseRegister();
3756       if (ShiftReg == -1) {
3757         Error(L, "expected immediate or register in shift operand");
3758         return -1;
3759       }
3760     } else {
3761       Error(Parser.getTok().getLoc(),
3762             "expected immediate or register in shift operand");
3763       return -1;
3764     }
3765   }
3766 
3767   if (ShiftReg && ShiftTy != ARM_AM::rrx)
3768     Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3769                                                          ShiftReg, Imm,
3770                                                          S, EndLoc));
3771   else
3772     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3773                                                           S, EndLoc));
3774 
3775   return 0;
3776 }
3777 
3778 /// Try to parse a register name.  The token must be an Identifier when called.
3779 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
3780 /// if there is a "writeback". 'true' if it's not a register.
3781 ///
3782 /// TODO this is likely to change to allow different register types and or to
3783 /// parse for a specific register type.
3784 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3785   MCAsmParser &Parser = getParser();
3786   SMLoc RegStartLoc = Parser.getTok().getLoc();
3787   SMLoc RegEndLoc = Parser.getTok().getEndLoc();
3788   int RegNo = tryParseRegister();
3789   if (RegNo == -1)
3790     return true;
3791 
3792   Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
3793 
3794   const AsmToken &ExclaimTok = Parser.getTok();
3795   if (ExclaimTok.is(AsmToken::Exclaim)) {
3796     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3797                                                ExclaimTok.getLoc()));
3798     Parser.Lex(); // Eat exclaim token
3799     return false;
3800   }
3801 
3802   // Also check for an index operand. This is only legal for vector registers,
3803   // but that'll get caught OK in operand matching, so we don't need to
3804   // explicitly filter everything else out here.
3805   if (Parser.getTok().is(AsmToken::LBrac)) {
3806     SMLoc SIdx = Parser.getTok().getLoc();
3807     Parser.Lex(); // Eat left bracket token.
3808 
3809     const MCExpr *ImmVal;
3810     if (getParser().parseExpression(ImmVal))
3811       return true;
3812     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3813     if (!MCE)
3814       return TokError("immediate value expected for vector index");
3815 
3816     if (Parser.getTok().isNot(AsmToken::RBrac))
3817       return Error(Parser.getTok().getLoc(), "']' expected");
3818 
3819     SMLoc E = Parser.getTok().getEndLoc();
3820     Parser.Lex(); // Eat right bracket token.
3821 
3822     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3823                                                      SIdx, E,
3824                                                      getContext()));
3825   }
3826 
3827   return false;
3828 }
3829 
3830 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
3831 /// instruction with a symbolic operand name.
3832 /// We accept "crN" syntax for GAS compatibility.
3833 /// <operand-name> ::= <prefix><number>
3834 /// If CoprocOp is 'c', then:
3835 ///   <prefix> ::= c | cr
3836 /// If CoprocOp is 'p', then :
3837 ///   <prefix> ::= p
3838 /// <number> ::= integer in range [0, 15]
3839 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3840   // Use the same layout as the tablegen'erated register name matcher. Ugly,
3841   // but efficient.
3842   if (Name.size() < 2 || Name[0] != CoprocOp)
3843     return -1;
3844   Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3845 
3846   switch (Name.size()) {
3847   default: return -1;
3848   case 1:
3849     switch (Name[0]) {
3850     default:  return -1;
3851     case '0': return 0;
3852     case '1': return 1;
3853     case '2': return 2;
3854     case '3': return 3;
3855     case '4': return 4;
3856     case '5': return 5;
3857     case '6': return 6;
3858     case '7': return 7;
3859     case '8': return 8;
3860     case '9': return 9;
3861     }
3862   case 2:
3863     if (Name[0] != '1')
3864       return -1;
3865     switch (Name[1]) {
3866     default:  return -1;
3867     // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3868     // However, old cores (v5/v6) did use them in that way.
3869     case '0': return 10;
3870     case '1': return 11;
3871     case '2': return 12;
3872     case '3': return 13;
3873     case '4': return 14;
3874     case '5': return 15;
3875     }
3876   }
3877 }
3878 
3879 /// parseITCondCode - Try to parse a condition code for an IT instruction.
3880 OperandMatchResultTy
3881 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3882   MCAsmParser &Parser = getParser();
3883   SMLoc S = Parser.getTok().getLoc();
3884   const AsmToken &Tok = Parser.getTok();
3885   if (!Tok.is(AsmToken::Identifier))
3886     return MatchOperand_NoMatch;
3887   unsigned CC = ARMCondCodeFromString(Tok.getString());
3888   if (CC == ~0U)
3889     return MatchOperand_NoMatch;
3890   Parser.Lex(); // Eat the token.
3891 
3892   Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3893 
3894   return MatchOperand_Success;
3895 }
3896 
3897 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3898 /// token must be an Identifier when called, and if it is a coprocessor
3899 /// number, the token is eaten and the operand is added to the operand list.
3900 OperandMatchResultTy
3901 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3902   MCAsmParser &Parser = getParser();
3903   SMLoc S = Parser.getTok().getLoc();
3904   const AsmToken &Tok = Parser.getTok();
3905   if (Tok.isNot(AsmToken::Identifier))
3906     return MatchOperand_NoMatch;
3907 
3908   int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
3909   if (Num == -1)
3910     return MatchOperand_NoMatch;
3911   // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3912   if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3913     return MatchOperand_NoMatch;
3914 
3915   Parser.Lex(); // Eat identifier token.
3916   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3917   return MatchOperand_Success;
3918 }
3919 
3920 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3921 /// token must be an Identifier when called, and if it is a coprocessor
3922 /// number, the token is eaten and the operand is added to the operand list.
3923 OperandMatchResultTy
3924 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3925   MCAsmParser &Parser = getParser();
3926   SMLoc S = Parser.getTok().getLoc();
3927   const AsmToken &Tok = Parser.getTok();
3928   if (Tok.isNot(AsmToken::Identifier))
3929     return MatchOperand_NoMatch;
3930 
3931   int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
3932   if (Reg == -1)
3933     return MatchOperand_NoMatch;
3934 
3935   Parser.Lex(); // Eat identifier token.
3936   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3937   return MatchOperand_Success;
3938 }
3939 
3940 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3941 /// coproc_option : '{' imm0_255 '}'
3942 OperandMatchResultTy
3943 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3944   MCAsmParser &Parser = getParser();
3945   SMLoc S = Parser.getTok().getLoc();
3946 
3947   // If this isn't a '{', this isn't a coprocessor immediate operand.
3948   if (Parser.getTok().isNot(AsmToken::LCurly))
3949     return MatchOperand_NoMatch;
3950   Parser.Lex(); // Eat the '{'
3951 
3952   const MCExpr *Expr;
3953   SMLoc Loc = Parser.getTok().getLoc();
3954   if (getParser().parseExpression(Expr)) {
3955     Error(Loc, "illegal expression");
3956     return MatchOperand_ParseFail;
3957   }
3958   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3959   if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3960     Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3961     return MatchOperand_ParseFail;
3962   }
3963   int Val = CE->getValue();
3964 
3965   // Check for and consume the closing '}'
3966   if (Parser.getTok().isNot(AsmToken::RCurly))
3967     return MatchOperand_ParseFail;
3968   SMLoc E = Parser.getTok().getEndLoc();
3969   Parser.Lex(); // Eat the '}'
3970 
3971   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3972   return MatchOperand_Success;
3973 }
3974 
3975 // For register list parsing, we need to map from raw GPR register numbering
3976 // to the enumeration values. The enumeration values aren't sorted by
3977 // register number due to our using "sp", "lr" and "pc" as canonical names.
3978 static unsigned getNextRegister(unsigned Reg) {
3979   // If this is a GPR, we need to do it manually, otherwise we can rely
3980   // on the sort ordering of the enumeration since the other reg-classes
3981   // are sane.
3982   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3983     return Reg + 1;
3984   switch(Reg) {
3985   default: llvm_unreachable("Invalid GPR number!");
3986   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
3987   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
3988   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
3989   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
3990   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
3991   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3992   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
3993   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
3994   }
3995 }
3996 
3997 /// Parse a register list.
3998 bool ARMAsmParser::parseRegisterList(OperandVector &Operands,
3999                                      bool EnforceOrder) {
4000   MCAsmParser &Parser = getParser();
4001   if (Parser.getTok().isNot(AsmToken::LCurly))
4002     return TokError("Token is not a Left Curly Brace");
4003   SMLoc S = Parser.getTok().getLoc();
4004   Parser.Lex(); // Eat '{' token.
4005   SMLoc RegLoc = Parser.getTok().getLoc();
4006 
4007   // Check the first register in the list to see what register class
4008   // this is a list of.
4009   int Reg = tryParseRegister();
4010   if (Reg == -1)
4011     return Error(RegLoc, "register expected");
4012 
4013   // The reglist instructions have at most 16 registers, so reserve
4014   // space for that many.
4015   int EReg = 0;
4016   SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
4017 
4018   // Allow Q regs and just interpret them as the two D sub-registers.
4019   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4020     Reg = getDRegFromQReg(Reg);
4021     EReg = MRI->getEncodingValue(Reg);
4022     Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4023     ++Reg;
4024   }
4025   const MCRegisterClass *RC;
4026   if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4027     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
4028   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
4029     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
4030   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
4031     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
4032   else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4033     RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4034   else
4035     return Error(RegLoc, "invalid register in register list");
4036 
4037   // Store the register.
4038   EReg = MRI->getEncodingValue(Reg);
4039   Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4040 
4041   // This starts immediately after the first register token in the list,
4042   // so we can see either a comma or a minus (range separator) as a legal
4043   // next token.
4044   while (Parser.getTok().is(AsmToken::Comma) ||
4045          Parser.getTok().is(AsmToken::Minus)) {
4046     if (Parser.getTok().is(AsmToken::Minus)) {
4047       Parser.Lex(); // Eat the minus.
4048       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4049       int EndReg = tryParseRegister();
4050       if (EndReg == -1)
4051         return Error(AfterMinusLoc, "register expected");
4052       // Allow Q regs and just interpret them as the two D sub-registers.
4053       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4054         EndReg = getDRegFromQReg(EndReg) + 1;
4055       // If the register is the same as the start reg, there's nothing
4056       // more to do.
4057       if (Reg == EndReg)
4058         continue;
4059       // The register must be in the same register class as the first.
4060       if (!RC->contains(EndReg))
4061         return Error(AfterMinusLoc, "invalid register in register list");
4062       // Ranges must go from low to high.
4063       if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
4064         return Error(AfterMinusLoc, "bad range in register list");
4065 
4066       // Add all the registers in the range to the register list.
4067       while (Reg != EndReg) {
4068         Reg = getNextRegister(Reg);
4069         EReg = MRI->getEncodingValue(Reg);
4070         Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4071       }
4072       continue;
4073     }
4074     Parser.Lex(); // Eat the comma.
4075     RegLoc = Parser.getTok().getLoc();
4076     int OldReg = Reg;
4077     const AsmToken RegTok = Parser.getTok();
4078     Reg = tryParseRegister();
4079     if (Reg == -1)
4080       return Error(RegLoc, "register expected");
4081     // Allow Q regs and just interpret them as the two D sub-registers.
4082     bool isQReg = false;
4083     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4084       Reg = getDRegFromQReg(Reg);
4085       isQReg = true;
4086     }
4087     if (!RC->contains(Reg) &&
4088         RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() &&
4089         ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) {
4090       // switch the register classes, as GPRwithAPSRnospRegClassID is a partial
4091       // subset of GPRRegClassId except it contains APSR as well.
4092       RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID];
4093     }
4094     if (Reg == ARM::VPR && (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] ||
4095                             RC == &ARMMCRegisterClasses[ARM::DPRRegClassID])) {
4096       RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID];
4097       EReg = MRI->getEncodingValue(Reg);
4098       Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4099       continue;
4100     }
4101     // The register must be in the same register class as the first.
4102     if (!RC->contains(Reg))
4103       return Error(RegLoc, "invalid register in register list");
4104     // In most cases, the list must be monotonically increasing. An
4105     // exception is CLRM, which is order-independent anyway, so
4106     // there's no potential for confusion if you write clrm {r2,r1}
4107     // instead of clrm {r1,r2}.
4108     if (EnforceOrder &&
4109         MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
4110       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
4111         Warning(RegLoc, "register list not in ascending order");
4112       else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg))
4113         return Error(RegLoc, "register list not in ascending order");
4114     }
4115     if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
4116       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
4117               ") in register list");
4118       continue;
4119     }
4120     // VFP register lists must also be contiguous.
4121     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
4122         RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] &&
4123         Reg != OldReg + 1)
4124       return Error(RegLoc, "non-contiguous register range");
4125     EReg = MRI->getEncodingValue(Reg);
4126     Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4127     if (isQReg) {
4128       EReg = MRI->getEncodingValue(++Reg);
4129       Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
4130     }
4131   }
4132 
4133   if (Parser.getTok().isNot(AsmToken::RCurly))
4134     return Error(Parser.getTok().getLoc(), "'}' expected");
4135   SMLoc E = Parser.getTok().getEndLoc();
4136   Parser.Lex(); // Eat '}' token.
4137 
4138   // Push the register list operand.
4139   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
4140 
4141   // The ARM system instruction variants for LDM/STM have a '^' token here.
4142   if (Parser.getTok().is(AsmToken::Caret)) {
4143     Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
4144     Parser.Lex(); // Eat '^' token.
4145   }
4146 
4147   return false;
4148 }
4149 
4150 // Helper function to parse the lane index for vector lists.
4151 OperandMatchResultTy ARMAsmParser::
4152 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
4153   MCAsmParser &Parser = getParser();
4154   Index = 0; // Always return a defined index value.
4155   if (Parser.getTok().is(AsmToken::LBrac)) {
4156     Parser.Lex(); // Eat the '['.
4157     if (Parser.getTok().is(AsmToken::RBrac)) {
4158       // "Dn[]" is the 'all lanes' syntax.
4159       LaneKind = AllLanes;
4160       EndLoc = Parser.getTok().getEndLoc();
4161       Parser.Lex(); // Eat the ']'.
4162       return MatchOperand_Success;
4163     }
4164 
4165     // There's an optional '#' token here. Normally there wouldn't be, but
4166     // inline assemble puts one in, and it's friendly to accept that.
4167     if (Parser.getTok().is(AsmToken::Hash))
4168       Parser.Lex(); // Eat '#' or '$'.
4169 
4170     const MCExpr *LaneIndex;
4171     SMLoc Loc = Parser.getTok().getLoc();
4172     if (getParser().parseExpression(LaneIndex)) {
4173       Error(Loc, "illegal expression");
4174       return MatchOperand_ParseFail;
4175     }
4176     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
4177     if (!CE) {
4178       Error(Loc, "lane index must be empty or an integer");
4179       return MatchOperand_ParseFail;
4180     }
4181     if (Parser.getTok().isNot(AsmToken::RBrac)) {
4182       Error(Parser.getTok().getLoc(), "']' expected");
4183       return MatchOperand_ParseFail;
4184     }
4185     EndLoc = Parser.getTok().getEndLoc();
4186     Parser.Lex(); // Eat the ']'.
4187     int64_t Val = CE->getValue();
4188 
4189     // FIXME: Make this range check context sensitive for .8, .16, .32.
4190     if (Val < 0 || Val > 7) {
4191       Error(Parser.getTok().getLoc(), "lane index out of range");
4192       return MatchOperand_ParseFail;
4193     }
4194     Index = Val;
4195     LaneKind = IndexedLane;
4196     return MatchOperand_Success;
4197   }
4198   LaneKind = NoLanes;
4199   return MatchOperand_Success;
4200 }
4201 
4202 // parse a vector register list
4203 OperandMatchResultTy
4204 ARMAsmParser::parseVectorList(OperandVector &Operands) {
4205   MCAsmParser &Parser = getParser();
4206   VectorLaneTy LaneKind;
4207   unsigned LaneIndex;
4208   SMLoc S = Parser.getTok().getLoc();
4209   // As an extension (to match gas), support a plain D register or Q register
4210   // (without encosing curly braces) as a single or double entry list,
4211   // respectively.
4212   if (Parser.getTok().is(AsmToken::Identifier)) {
4213     SMLoc E = Parser.getTok().getEndLoc();
4214     int Reg = tryParseRegister();
4215     if (Reg == -1)
4216       return MatchOperand_NoMatch;
4217     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
4218       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4219       if (Res != MatchOperand_Success)
4220         return Res;
4221       switch (LaneKind) {
4222       case NoLanes:
4223         Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
4224         break;
4225       case AllLanes:
4226         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
4227                                                                 S, E));
4228         break;
4229       case IndexedLane:
4230         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
4231                                                                LaneIndex,
4232                                                                false, S, E));
4233         break;
4234       }
4235       return MatchOperand_Success;
4236     }
4237     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4238       Reg = getDRegFromQReg(Reg);
4239       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
4240       if (Res != MatchOperand_Success)
4241         return Res;
4242       switch (LaneKind) {
4243       case NoLanes:
4244         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4245                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4246         Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
4247         break;
4248       case AllLanes:
4249         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
4250                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
4251         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
4252                                                                 S, E));
4253         break;
4254       case IndexedLane:
4255         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
4256                                                                LaneIndex,
4257                                                                false, S, E));
4258         break;
4259       }
4260       return MatchOperand_Success;
4261     }
4262     Error(S, "vector register expected");
4263     return MatchOperand_ParseFail;
4264   }
4265 
4266   if (Parser.getTok().isNot(AsmToken::LCurly))
4267     return MatchOperand_NoMatch;
4268 
4269   Parser.Lex(); // Eat '{' token.
4270   SMLoc RegLoc = Parser.getTok().getLoc();
4271 
4272   int Reg = tryParseRegister();
4273   if (Reg == -1) {
4274     Error(RegLoc, "register expected");
4275     return MatchOperand_ParseFail;
4276   }
4277   unsigned Count = 1;
4278   int Spacing = 0;
4279   unsigned FirstReg = Reg;
4280   // The list is of D registers, but we also allow Q regs and just interpret
4281   // them as the two D sub-registers.
4282   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4283     FirstReg = Reg = getDRegFromQReg(Reg);
4284     Spacing = 1; // double-spacing requires explicit D registers, otherwise
4285                  // it's ambiguous with four-register single spaced.
4286     ++Reg;
4287     ++Count;
4288   }
4289 
4290   SMLoc E;
4291   if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
4292     return MatchOperand_ParseFail;
4293 
4294   while (Parser.getTok().is(AsmToken::Comma) ||
4295          Parser.getTok().is(AsmToken::Minus)) {
4296     if (Parser.getTok().is(AsmToken::Minus)) {
4297       if (!Spacing)
4298         Spacing = 1; // Register range implies a single spaced list.
4299       else if (Spacing == 2) {
4300         Error(Parser.getTok().getLoc(),
4301               "sequential registers in double spaced list");
4302         return MatchOperand_ParseFail;
4303       }
4304       Parser.Lex(); // Eat the minus.
4305       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4306       int EndReg = tryParseRegister();
4307       if (EndReg == -1) {
4308         Error(AfterMinusLoc, "register expected");
4309         return MatchOperand_ParseFail;
4310       }
4311       // Allow Q regs and just interpret them as the two D sub-registers.
4312       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4313         EndReg = getDRegFromQReg(EndReg) + 1;
4314       // If the register is the same as the start reg, there's nothing
4315       // more to do.
4316       if (Reg == EndReg)
4317         continue;
4318       // The register must be in the same register class as the first.
4319       if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
4320         Error(AfterMinusLoc, "invalid register in register list");
4321         return MatchOperand_ParseFail;
4322       }
4323       // Ranges must go from low to high.
4324       if (Reg > EndReg) {
4325         Error(AfterMinusLoc, "bad range in register list");
4326         return MatchOperand_ParseFail;
4327       }
4328       // Parse the lane specifier if present.
4329       VectorLaneTy NextLaneKind;
4330       unsigned NextLaneIndex;
4331       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4332           MatchOperand_Success)
4333         return MatchOperand_ParseFail;
4334       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4335         Error(AfterMinusLoc, "mismatched lane index in register list");
4336         return MatchOperand_ParseFail;
4337       }
4338 
4339       // Add all the registers in the range to the register list.
4340       Count += EndReg - Reg;
4341       Reg = EndReg;
4342       continue;
4343     }
4344     Parser.Lex(); // Eat the comma.
4345     RegLoc = Parser.getTok().getLoc();
4346     int OldReg = Reg;
4347     Reg = tryParseRegister();
4348     if (Reg == -1) {
4349       Error(RegLoc, "register expected");
4350       return MatchOperand_ParseFail;
4351     }
4352     // vector register lists must be contiguous.
4353     // It's OK to use the enumeration values directly here rather, as the
4354     // VFP register classes have the enum sorted properly.
4355     //
4356     // The list is of D registers, but we also allow Q regs and just interpret
4357     // them as the two D sub-registers.
4358     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4359       if (!Spacing)
4360         Spacing = 1; // Register range implies a single spaced list.
4361       else if (Spacing == 2) {
4362         Error(RegLoc,
4363               "invalid register in double-spaced list (must be 'D' register')");
4364         return MatchOperand_ParseFail;
4365       }
4366       Reg = getDRegFromQReg(Reg);
4367       if (Reg != OldReg + 1) {
4368         Error(RegLoc, "non-contiguous register range");
4369         return MatchOperand_ParseFail;
4370       }
4371       ++Reg;
4372       Count += 2;
4373       // Parse the lane specifier if present.
4374       VectorLaneTy NextLaneKind;
4375       unsigned NextLaneIndex;
4376       SMLoc LaneLoc = Parser.getTok().getLoc();
4377       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4378           MatchOperand_Success)
4379         return MatchOperand_ParseFail;
4380       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4381         Error(LaneLoc, "mismatched lane index in register list");
4382         return MatchOperand_ParseFail;
4383       }
4384       continue;
4385     }
4386     // Normal D register.
4387     // Figure out the register spacing (single or double) of the list if
4388     // we don't know it already.
4389     if (!Spacing)
4390       Spacing = 1 + (Reg == OldReg + 2);
4391 
4392     // Just check that it's contiguous and keep going.
4393     if (Reg != OldReg + Spacing) {
4394       Error(RegLoc, "non-contiguous register range");
4395       return MatchOperand_ParseFail;
4396     }
4397     ++Count;
4398     // Parse the lane specifier if present.
4399     VectorLaneTy NextLaneKind;
4400     unsigned NextLaneIndex;
4401     SMLoc EndLoc = Parser.getTok().getLoc();
4402     if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4403       return MatchOperand_ParseFail;
4404     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4405       Error(EndLoc, "mismatched lane index in register list");
4406       return MatchOperand_ParseFail;
4407     }
4408   }
4409 
4410   if (Parser.getTok().isNot(AsmToken::RCurly)) {
4411     Error(Parser.getTok().getLoc(), "'}' expected");
4412     return MatchOperand_ParseFail;
4413   }
4414   E = Parser.getTok().getEndLoc();
4415   Parser.Lex(); // Eat '}' token.
4416 
4417   switch (LaneKind) {
4418   case NoLanes:
4419     // Two-register operands have been converted to the
4420     // composite register classes.
4421     if (Count == 2) {
4422       const MCRegisterClass *RC = (Spacing == 1) ?
4423         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4424         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4425       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4426     }
4427     Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
4428                                                     (Spacing == 2), S, E));
4429     break;
4430   case AllLanes:
4431     // Two-register operands have been converted to the
4432     // composite register classes.
4433     if (Count == 2) {
4434       const MCRegisterClass *RC = (Spacing == 1) ?
4435         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4436         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4437       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4438     }
4439     Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
4440                                                             (Spacing == 2),
4441                                                             S, E));
4442     break;
4443   case IndexedLane:
4444     Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4445                                                            LaneIndex,
4446                                                            (Spacing == 2),
4447                                                            S, E));
4448     break;
4449   }
4450   return MatchOperand_Success;
4451 }
4452 
4453 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4454 OperandMatchResultTy
4455 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4456   MCAsmParser &Parser = getParser();
4457   SMLoc S = Parser.getTok().getLoc();
4458   const AsmToken &Tok = Parser.getTok();
4459   unsigned Opt;
4460 
4461   if (Tok.is(AsmToken::Identifier)) {
4462     StringRef OptStr = Tok.getString();
4463 
4464     Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4465       .Case("sy",    ARM_MB::SY)
4466       .Case("st",    ARM_MB::ST)
4467       .Case("ld",    ARM_MB::LD)
4468       .Case("sh",    ARM_MB::ISH)
4469       .Case("ish",   ARM_MB::ISH)
4470       .Case("shst",  ARM_MB::ISHST)
4471       .Case("ishst", ARM_MB::ISHST)
4472       .Case("ishld", ARM_MB::ISHLD)
4473       .Case("nsh",   ARM_MB::NSH)
4474       .Case("un",    ARM_MB::NSH)
4475       .Case("nshst", ARM_MB::NSHST)
4476       .Case("nshld", ARM_MB::NSHLD)
4477       .Case("unst",  ARM_MB::NSHST)
4478       .Case("osh",   ARM_MB::OSH)
4479       .Case("oshst", ARM_MB::OSHST)
4480       .Case("oshld", ARM_MB::OSHLD)
4481       .Default(~0U);
4482 
4483     // ishld, oshld, nshld and ld are only available from ARMv8.
4484     if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4485                         Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4486       Opt = ~0U;
4487 
4488     if (Opt == ~0U)
4489       return MatchOperand_NoMatch;
4490 
4491     Parser.Lex(); // Eat identifier token.
4492   } else if (Tok.is(AsmToken::Hash) ||
4493              Tok.is(AsmToken::Dollar) ||
4494              Tok.is(AsmToken::Integer)) {
4495     if (Parser.getTok().isNot(AsmToken::Integer))
4496       Parser.Lex(); // Eat '#' or '$'.
4497     SMLoc Loc = Parser.getTok().getLoc();
4498 
4499     const MCExpr *MemBarrierID;
4500     if (getParser().parseExpression(MemBarrierID)) {
4501       Error(Loc, "illegal expression");
4502       return MatchOperand_ParseFail;
4503     }
4504 
4505     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4506     if (!CE) {
4507       Error(Loc, "constant expression expected");
4508       return MatchOperand_ParseFail;
4509     }
4510 
4511     int Val = CE->getValue();
4512     if (Val & ~0xf) {
4513       Error(Loc, "immediate value out of range");
4514       return MatchOperand_ParseFail;
4515     }
4516 
4517     Opt = ARM_MB::RESERVED_0 + Val;
4518   } else
4519     return MatchOperand_ParseFail;
4520 
4521   Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4522   return MatchOperand_Success;
4523 }
4524 
4525 OperandMatchResultTy
4526 ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
4527   MCAsmParser &Parser = getParser();
4528   SMLoc S = Parser.getTok().getLoc();
4529   const AsmToken &Tok = Parser.getTok();
4530 
4531   if (Tok.isNot(AsmToken::Identifier))
4532      return MatchOperand_NoMatch;
4533 
4534   if (!Tok.getString().equals_lower("csync"))
4535     return MatchOperand_NoMatch;
4536 
4537   Parser.Lex(); // Eat identifier token.
4538 
4539   Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
4540   return MatchOperand_Success;
4541 }
4542 
4543 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4544 OperandMatchResultTy
4545 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4546   MCAsmParser &Parser = getParser();
4547   SMLoc S = Parser.getTok().getLoc();
4548   const AsmToken &Tok = Parser.getTok();
4549   unsigned Opt;
4550 
4551   if (Tok.is(AsmToken::Identifier)) {
4552     StringRef OptStr = Tok.getString();
4553 
4554     if (OptStr.equals_lower("sy"))
4555       Opt = ARM_ISB::SY;
4556     else
4557       return MatchOperand_NoMatch;
4558 
4559     Parser.Lex(); // Eat identifier token.
4560   } else if (Tok.is(AsmToken::Hash) ||
4561              Tok.is(AsmToken::Dollar) ||
4562              Tok.is(AsmToken::Integer)) {
4563     if (Parser.getTok().isNot(AsmToken::Integer))
4564       Parser.Lex(); // Eat '#' or '$'.
4565     SMLoc Loc = Parser.getTok().getLoc();
4566 
4567     const MCExpr *ISBarrierID;
4568     if (getParser().parseExpression(ISBarrierID)) {
4569       Error(Loc, "illegal expression");
4570       return MatchOperand_ParseFail;
4571     }
4572 
4573     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
4574     if (!CE) {
4575       Error(Loc, "constant expression expected");
4576       return MatchOperand_ParseFail;
4577     }
4578 
4579     int Val = CE->getValue();
4580     if (Val & ~0xf) {
4581       Error(Loc, "immediate value out of range");
4582       return MatchOperand_ParseFail;
4583     }
4584 
4585     Opt = ARM_ISB::RESERVED_0 + Val;
4586   } else
4587     return MatchOperand_ParseFail;
4588 
4589   Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
4590           (ARM_ISB::InstSyncBOpt)Opt, S));
4591   return MatchOperand_Success;
4592 }
4593 
4594 
4595 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
4596 OperandMatchResultTy
4597 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
4598   MCAsmParser &Parser = getParser();
4599   SMLoc S = Parser.getTok().getLoc();
4600   const AsmToken &Tok = Parser.getTok();
4601   if (!Tok.is(AsmToken::Identifier))
4602     return MatchOperand_NoMatch;
4603   StringRef IFlagsStr = Tok.getString();
4604 
4605   // An iflags string of "none" is interpreted to mean that none of the AIF
4606   // bits are set.  Not a terribly useful instruction, but a valid encoding.
4607   unsigned IFlags = 0;
4608   if (IFlagsStr != "none") {
4609         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
4610       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
4611         .Case("a", ARM_PROC::A)
4612         .Case("i", ARM_PROC::I)
4613         .Case("f", ARM_PROC::F)
4614         .Default(~0U);
4615 
4616       // If some specific iflag is already set, it means that some letter is
4617       // present more than once, this is not acceptable.
4618       if (Flag == ~0U || (IFlags & Flag))
4619         return MatchOperand_NoMatch;
4620 
4621       IFlags |= Flag;
4622     }
4623   }
4624 
4625   Parser.Lex(); // Eat identifier token.
4626   Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
4627   return MatchOperand_Success;
4628 }
4629 
4630 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
4631 OperandMatchResultTy
4632 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
4633   MCAsmParser &Parser = getParser();
4634   SMLoc S = Parser.getTok().getLoc();
4635   const AsmToken &Tok = Parser.getTok();
4636 
4637   if (Tok.is(AsmToken::Integer)) {
4638     int64_t Val = Tok.getIntVal();
4639     if (Val > 255 || Val < 0) {
4640       return MatchOperand_NoMatch;
4641     }
4642     unsigned SYSmvalue = Val & 0xFF;
4643     Parser.Lex();
4644     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4645     return MatchOperand_Success;
4646   }
4647 
4648   if (!Tok.is(AsmToken::Identifier))
4649     return MatchOperand_NoMatch;
4650   StringRef Mask = Tok.getString();
4651 
4652   if (isMClass()) {
4653     auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
4654     if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
4655       return MatchOperand_NoMatch;
4656 
4657     unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
4658 
4659     Parser.Lex(); // Eat identifier token.
4660     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4661     return MatchOperand_Success;
4662   }
4663 
4664   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
4665   size_t Start = 0, Next = Mask.find('_');
4666   StringRef Flags = "";
4667   std::string SpecReg = Mask.slice(Start, Next).lower();
4668   if (Next != StringRef::npos)
4669     Flags = Mask.slice(Next+1, Mask.size());
4670 
4671   // FlagsVal contains the complete mask:
4672   // 3-0: Mask
4673   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4674   unsigned FlagsVal = 0;
4675 
4676   if (SpecReg == "apsr") {
4677     FlagsVal = StringSwitch<unsigned>(Flags)
4678     .Case("nzcvq",  0x8) // same as CPSR_f
4679     .Case("g",      0x4) // same as CPSR_s
4680     .Case("nzcvqg", 0xc) // same as CPSR_fs
4681     .Default(~0U);
4682 
4683     if (FlagsVal == ~0U) {
4684       if (!Flags.empty())
4685         return MatchOperand_NoMatch;
4686       else
4687         FlagsVal = 8; // No flag
4688     }
4689   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
4690     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
4691     if (Flags == "all" || Flags == "")
4692       Flags = "fc";
4693     for (int i = 0, e = Flags.size(); i != e; ++i) {
4694       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
4695       .Case("c", 1)
4696       .Case("x", 2)
4697       .Case("s", 4)
4698       .Case("f", 8)
4699       .Default(~0U);
4700 
4701       // If some specific flag is already set, it means that some letter is
4702       // present more than once, this is not acceptable.
4703       if (Flag == ~0U || (FlagsVal & Flag))
4704         return MatchOperand_NoMatch;
4705       FlagsVal |= Flag;
4706     }
4707   } else // No match for special register.
4708     return MatchOperand_NoMatch;
4709 
4710   // Special register without flags is NOT equivalent to "fc" flags.
4711   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
4712   // two lines would enable gas compatibility at the expense of breaking
4713   // round-tripping.
4714   //
4715   // if (!FlagsVal)
4716   //  FlagsVal = 0x9;
4717 
4718   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4719   if (SpecReg == "spsr")
4720     FlagsVal |= 16;
4721 
4722   Parser.Lex(); // Eat identifier token.
4723   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4724   return MatchOperand_Success;
4725 }
4726 
4727 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
4728 /// use in the MRS/MSR instructions added to support virtualization.
4729 OperandMatchResultTy
4730 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
4731   MCAsmParser &Parser = getParser();
4732   SMLoc S = Parser.getTok().getLoc();
4733   const AsmToken &Tok = Parser.getTok();
4734   if (!Tok.is(AsmToken::Identifier))
4735     return MatchOperand_NoMatch;
4736   StringRef RegName = Tok.getString();
4737 
4738   auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
4739   if (!TheReg)
4740     return MatchOperand_NoMatch;
4741   unsigned Encoding = TheReg->Encoding;
4742 
4743   Parser.Lex(); // Eat identifier token.
4744   Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
4745   return MatchOperand_Success;
4746 }
4747 
4748 OperandMatchResultTy
4749 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4750                           int High) {
4751   MCAsmParser &Parser = getParser();
4752   const AsmToken &Tok = Parser.getTok();
4753   if (Tok.isNot(AsmToken::Identifier)) {
4754     Error(Parser.getTok().getLoc(), Op + " operand expected.");
4755     return MatchOperand_ParseFail;
4756   }
4757   StringRef ShiftName = Tok.getString();
4758   std::string LowerOp = Op.lower();
4759   std::string UpperOp = Op.upper();
4760   if (ShiftName != LowerOp && ShiftName != UpperOp) {
4761     Error(Parser.getTok().getLoc(), Op + " operand expected.");
4762     return MatchOperand_ParseFail;
4763   }
4764   Parser.Lex(); // Eat shift type token.
4765 
4766   // There must be a '#' and a shift amount.
4767   if (Parser.getTok().isNot(AsmToken::Hash) &&
4768       Parser.getTok().isNot(AsmToken::Dollar)) {
4769     Error(Parser.getTok().getLoc(), "'#' expected");
4770     return MatchOperand_ParseFail;
4771   }
4772   Parser.Lex(); // Eat hash token.
4773 
4774   const MCExpr *ShiftAmount;
4775   SMLoc Loc = Parser.getTok().getLoc();
4776   SMLoc EndLoc;
4777   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4778     Error(Loc, "illegal expression");
4779     return MatchOperand_ParseFail;
4780   }
4781   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4782   if (!CE) {
4783     Error(Loc, "constant expression expected");
4784     return MatchOperand_ParseFail;
4785   }
4786   int Val = CE->getValue();
4787   if (Val < Low || Val > High) {
4788     Error(Loc, "immediate value out of range");
4789     return MatchOperand_ParseFail;
4790   }
4791 
4792   Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4793 
4794   return MatchOperand_Success;
4795 }
4796 
4797 OperandMatchResultTy
4798 ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4799   MCAsmParser &Parser = getParser();
4800   const AsmToken &Tok = Parser.getTok();
4801   SMLoc S = Tok.getLoc();
4802   if (Tok.isNot(AsmToken::Identifier)) {
4803     Error(S, "'be' or 'le' operand expected");
4804     return MatchOperand_ParseFail;
4805   }
4806   int Val = StringSwitch<int>(Tok.getString().lower())
4807     .Case("be", 1)
4808     .Case("le", 0)
4809     .Default(-1);
4810   Parser.Lex(); // Eat the token.
4811 
4812   if (Val == -1) {
4813     Error(S, "'be' or 'le' operand expected");
4814     return MatchOperand_ParseFail;
4815   }
4816   Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
4817                                                                   getContext()),
4818                                            S, Tok.getEndLoc()));
4819   return MatchOperand_Success;
4820 }
4821 
4822 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4823 /// instructions. Legal values are:
4824 ///     lsl #n  'n' in [0,31]
4825 ///     asr #n  'n' in [1,32]
4826 ///             n == 32 encoded as n == 0.
4827 OperandMatchResultTy
4828 ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4829   MCAsmParser &Parser = getParser();
4830   const AsmToken &Tok = Parser.getTok();
4831   SMLoc S = Tok.getLoc();
4832   if (Tok.isNot(AsmToken::Identifier)) {
4833     Error(S, "shift operator 'asr' or 'lsl' expected");
4834     return MatchOperand_ParseFail;
4835   }
4836   StringRef ShiftName = Tok.getString();
4837   bool isASR;
4838   if (ShiftName == "lsl" || ShiftName == "LSL")
4839     isASR = false;
4840   else if (ShiftName == "asr" || ShiftName == "ASR")
4841     isASR = true;
4842   else {
4843     Error(S, "shift operator 'asr' or 'lsl' expected");
4844     return MatchOperand_ParseFail;
4845   }
4846   Parser.Lex(); // Eat the operator.
4847 
4848   // A '#' and a shift amount.
4849   if (Parser.getTok().isNot(AsmToken::Hash) &&
4850       Parser.getTok().isNot(AsmToken::Dollar)) {
4851     Error(Parser.getTok().getLoc(), "'#' expected");
4852     return MatchOperand_ParseFail;
4853   }
4854   Parser.Lex(); // Eat hash token.
4855   SMLoc ExLoc = Parser.getTok().getLoc();
4856 
4857   const MCExpr *ShiftAmount;
4858   SMLoc EndLoc;
4859   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4860     Error(ExLoc, "malformed shift expression");
4861     return MatchOperand_ParseFail;
4862   }
4863   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4864   if (!CE) {
4865     Error(ExLoc, "shift amount must be an immediate");
4866     return MatchOperand_ParseFail;
4867   }
4868 
4869   int64_t Val = CE->getValue();
4870   if (isASR) {
4871     // Shift amount must be in [1,32]
4872     if (Val < 1 || Val > 32) {
4873       Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4874       return MatchOperand_ParseFail;
4875     }
4876     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4877     if (isThumb() && Val == 32) {
4878       Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4879       return MatchOperand_ParseFail;
4880     }
4881     if (Val == 32) Val = 0;
4882   } else {
4883     // Shift amount must be in [1,32]
4884     if (Val < 0 || Val > 31) {
4885       Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4886       return MatchOperand_ParseFail;
4887     }
4888   }
4889 
4890   Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4891 
4892   return MatchOperand_Success;
4893 }
4894 
4895 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4896 /// of instructions. Legal values are:
4897 ///     ror #n  'n' in {0, 8, 16, 24}
4898 OperandMatchResultTy
4899 ARMAsmParser::parseRotImm(OperandVector &Operands) {
4900   MCAsmParser &Parser = getParser();
4901   const AsmToken &Tok = Parser.getTok();
4902   SMLoc S = Tok.getLoc();
4903   if (Tok.isNot(AsmToken::Identifier))
4904     return MatchOperand_NoMatch;
4905   StringRef ShiftName = Tok.getString();
4906   if (ShiftName != "ror" && ShiftName != "ROR")
4907     return MatchOperand_NoMatch;
4908   Parser.Lex(); // Eat the operator.
4909 
4910   // A '#' and a rotate amount.
4911   if (Parser.getTok().isNot(AsmToken::Hash) &&
4912       Parser.getTok().isNot(AsmToken::Dollar)) {
4913     Error(Parser.getTok().getLoc(), "'#' expected");
4914     return MatchOperand_ParseFail;
4915   }
4916   Parser.Lex(); // Eat hash token.
4917   SMLoc ExLoc = Parser.getTok().getLoc();
4918 
4919   const MCExpr *ShiftAmount;
4920   SMLoc EndLoc;
4921   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4922     Error(ExLoc, "malformed rotate expression");
4923     return MatchOperand_ParseFail;
4924   }
4925   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4926   if (!CE) {
4927     Error(ExLoc, "rotate amount must be an immediate");
4928     return MatchOperand_ParseFail;
4929   }
4930 
4931   int64_t Val = CE->getValue();
4932   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4933   // normally, zero is represented in asm by omitting the rotate operand
4934   // entirely.
4935   if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4936     Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4937     return MatchOperand_ParseFail;
4938   }
4939 
4940   Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4941 
4942   return MatchOperand_Success;
4943 }
4944 
4945 OperandMatchResultTy
4946 ARMAsmParser::parseModImm(OperandVector &Operands) {
4947   MCAsmParser &Parser = getParser();
4948   MCAsmLexer &Lexer = getLexer();
4949   int64_t Imm1, Imm2;
4950 
4951   SMLoc S = Parser.getTok().getLoc();
4952 
4953   // 1) A mod_imm operand can appear in the place of a register name:
4954   //   add r0, #mod_imm
4955   //   add r0, r0, #mod_imm
4956   // to correctly handle the latter, we bail out as soon as we see an
4957   // identifier.
4958   //
4959   // 2) Similarly, we do not want to parse into complex operands:
4960   //   mov r0, #mod_imm
4961   //   mov r0, :lower16:(_foo)
4962   if (Parser.getTok().is(AsmToken::Identifier) ||
4963       Parser.getTok().is(AsmToken::Colon))
4964     return MatchOperand_NoMatch;
4965 
4966   // Hash (dollar) is optional as per the ARMARM
4967   if (Parser.getTok().is(AsmToken::Hash) ||
4968       Parser.getTok().is(AsmToken::Dollar)) {
4969     // Avoid parsing into complex operands (#:)
4970     if (Lexer.peekTok().is(AsmToken::Colon))
4971       return MatchOperand_NoMatch;
4972 
4973     // Eat the hash (dollar)
4974     Parser.Lex();
4975   }
4976 
4977   SMLoc Sx1, Ex1;
4978   Sx1 = Parser.getTok().getLoc();
4979   const MCExpr *Imm1Exp;
4980   if (getParser().parseExpression(Imm1Exp, Ex1)) {
4981     Error(Sx1, "malformed expression");
4982     return MatchOperand_ParseFail;
4983   }
4984 
4985   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
4986 
4987   if (CE) {
4988     // Immediate must fit within 32-bits
4989     Imm1 = CE->getValue();
4990     int Enc = ARM_AM::getSOImmVal(Imm1);
4991     if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
4992       // We have a match!
4993       Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
4994                                                   (Enc & 0xF00) >> 7,
4995                                                   Sx1, Ex1));
4996       return MatchOperand_Success;
4997     }
4998 
4999     // We have parsed an immediate which is not for us, fallback to a plain
5000     // immediate. This can happen for instruction aliases. For an example,
5001     // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
5002     // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
5003     // instruction with a mod_imm operand. The alias is defined such that the
5004     // parser method is shared, that's why we have to do this here.
5005     if (Parser.getTok().is(AsmToken::EndOfStatement)) {
5006       Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5007       return MatchOperand_Success;
5008     }
5009   } else {
5010     // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
5011     // MCFixup). Fallback to a plain immediate.
5012     Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
5013     return MatchOperand_Success;
5014   }
5015 
5016   // From this point onward, we expect the input to be a (#bits, #rot) pair
5017   if (Parser.getTok().isNot(AsmToken::Comma)) {
5018     Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
5019     return MatchOperand_ParseFail;
5020   }
5021 
5022   if (Imm1 & ~0xFF) {
5023     Error(Sx1, "immediate operand must a number in the range [0, 255]");
5024     return MatchOperand_ParseFail;
5025   }
5026 
5027   // Eat the comma
5028   Parser.Lex();
5029 
5030   // Repeat for #rot
5031   SMLoc Sx2, Ex2;
5032   Sx2 = Parser.getTok().getLoc();
5033 
5034   // Eat the optional hash (dollar)
5035   if (Parser.getTok().is(AsmToken::Hash) ||
5036       Parser.getTok().is(AsmToken::Dollar))
5037     Parser.Lex();
5038 
5039   const MCExpr *Imm2Exp;
5040   if (getParser().parseExpression(Imm2Exp, Ex2)) {
5041     Error(Sx2, "malformed expression");
5042     return MatchOperand_ParseFail;
5043   }
5044 
5045   CE = dyn_cast<MCConstantExpr>(Imm2Exp);
5046 
5047   if (CE) {
5048     Imm2 = CE->getValue();
5049     if (!(Imm2 & ~0x1E)) {
5050       // We have a match!
5051       Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
5052       return MatchOperand_Success;
5053     }
5054     Error(Sx2, "immediate operand must an even number in the range [0, 30]");
5055     return MatchOperand_ParseFail;
5056   } else {
5057     Error(Sx2, "constant expression expected");
5058     return MatchOperand_ParseFail;
5059   }
5060 }
5061 
5062 OperandMatchResultTy
5063 ARMAsmParser::parseBitfield(OperandVector &Operands) {
5064   MCAsmParser &Parser = getParser();
5065   SMLoc S = Parser.getTok().getLoc();
5066   // The bitfield descriptor is really two operands, the LSB and the width.
5067   if (Parser.getTok().isNot(AsmToken::Hash) &&
5068       Parser.getTok().isNot(AsmToken::Dollar)) {
5069     Error(Parser.getTok().getLoc(), "'#' expected");
5070     return MatchOperand_ParseFail;
5071   }
5072   Parser.Lex(); // Eat hash token.
5073 
5074   const MCExpr *LSBExpr;
5075   SMLoc E = Parser.getTok().getLoc();
5076   if (getParser().parseExpression(LSBExpr)) {
5077     Error(E, "malformed immediate expression");
5078     return MatchOperand_ParseFail;
5079   }
5080   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
5081   if (!CE) {
5082     Error(E, "'lsb' operand must be an immediate");
5083     return MatchOperand_ParseFail;
5084   }
5085 
5086   int64_t LSB = CE->getValue();
5087   // The LSB must be in the range [0,31]
5088   if (LSB < 0 || LSB > 31) {
5089     Error(E, "'lsb' operand must be in the range [0,31]");
5090     return MatchOperand_ParseFail;
5091   }
5092   E = Parser.getTok().getLoc();
5093 
5094   // Expect another immediate operand.
5095   if (Parser.getTok().isNot(AsmToken::Comma)) {
5096     Error(Parser.getTok().getLoc(), "too few operands");
5097     return MatchOperand_ParseFail;
5098   }
5099   Parser.Lex(); // Eat hash token.
5100   if (Parser.getTok().isNot(AsmToken::Hash) &&
5101       Parser.getTok().isNot(AsmToken::Dollar)) {
5102     Error(Parser.getTok().getLoc(), "'#' expected");
5103     return MatchOperand_ParseFail;
5104   }
5105   Parser.Lex(); // Eat hash token.
5106 
5107   const MCExpr *WidthExpr;
5108   SMLoc EndLoc;
5109   if (getParser().parseExpression(WidthExpr, EndLoc)) {
5110     Error(E, "malformed immediate expression");
5111     return MatchOperand_ParseFail;
5112   }
5113   CE = dyn_cast<MCConstantExpr>(WidthExpr);
5114   if (!CE) {
5115     Error(E, "'width' operand must be an immediate");
5116     return MatchOperand_ParseFail;
5117   }
5118 
5119   int64_t Width = CE->getValue();
5120   // The LSB must be in the range [1,32-lsb]
5121   if (Width < 1 || Width > 32 - LSB) {
5122     Error(E, "'width' operand must be in the range [1,32-lsb]");
5123     return MatchOperand_ParseFail;
5124   }
5125 
5126   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
5127 
5128   return MatchOperand_Success;
5129 }
5130 
5131 OperandMatchResultTy
5132 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
5133   // Check for a post-index addressing register operand. Specifically:
5134   // postidx_reg := '+' register {, shift}
5135   //              | '-' register {, shift}
5136   //              | register {, shift}
5137 
5138   // This method must return MatchOperand_NoMatch without consuming any tokens
5139   // in the case where there is no match, as other alternatives take other
5140   // parse methods.
5141   MCAsmParser &Parser = getParser();
5142   AsmToken Tok = Parser.getTok();
5143   SMLoc S = Tok.getLoc();
5144   bool haveEaten = false;
5145   bool isAdd = true;
5146   if (Tok.is(AsmToken::Plus)) {
5147     Parser.Lex(); // Eat the '+' token.
5148     haveEaten = true;
5149   } else if (Tok.is(AsmToken::Minus)) {
5150     Parser.Lex(); // Eat the '-' token.
5151     isAdd = false;
5152     haveEaten = true;
5153   }
5154 
5155   SMLoc E = Parser.getTok().getEndLoc();
5156   int Reg = tryParseRegister();
5157   if (Reg == -1) {
5158     if (!haveEaten)
5159       return MatchOperand_NoMatch;
5160     Error(Parser.getTok().getLoc(), "register expected");
5161     return MatchOperand_ParseFail;
5162   }
5163 
5164   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
5165   unsigned ShiftImm = 0;
5166   if (Parser.getTok().is(AsmToken::Comma)) {
5167     Parser.Lex(); // Eat the ','.
5168     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
5169       return MatchOperand_ParseFail;
5170 
5171     // FIXME: Only approximates end...may include intervening whitespace.
5172     E = Parser.getTok().getLoc();
5173   }
5174 
5175   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
5176                                                   ShiftImm, S, E));
5177 
5178   return MatchOperand_Success;
5179 }
5180 
5181 OperandMatchResultTy
5182 ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
5183   // Check for a post-index addressing register operand. Specifically:
5184   // am3offset := '+' register
5185   //              | '-' register
5186   //              | register
5187   //              | # imm
5188   //              | # + imm
5189   //              | # - imm
5190 
5191   // This method must return MatchOperand_NoMatch without consuming any tokens
5192   // in the case where there is no match, as other alternatives take other
5193   // parse methods.
5194   MCAsmParser &Parser = getParser();
5195   AsmToken Tok = Parser.getTok();
5196   SMLoc S = Tok.getLoc();
5197 
5198   // Do immediates first, as we always parse those if we have a '#'.
5199   if (Parser.getTok().is(AsmToken::Hash) ||
5200       Parser.getTok().is(AsmToken::Dollar)) {
5201     Parser.Lex(); // Eat '#' or '$'.
5202     // Explicitly look for a '-', as we need to encode negative zero
5203     // differently.
5204     bool isNegative = Parser.getTok().is(AsmToken::Minus);
5205     const MCExpr *Offset;
5206     SMLoc E;
5207     if (getParser().parseExpression(Offset, E))
5208       return MatchOperand_ParseFail;
5209     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5210     if (!CE) {
5211       Error(S, "constant expression expected");
5212       return MatchOperand_ParseFail;
5213     }
5214     // Negative zero is encoded as the flag value
5215     // std::numeric_limits<int32_t>::min().
5216     int32_t Val = CE->getValue();
5217     if (isNegative && Val == 0)
5218       Val = std::numeric_limits<int32_t>::min();
5219 
5220     Operands.push_back(
5221       ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
5222 
5223     return MatchOperand_Success;
5224   }
5225 
5226   bool haveEaten = false;
5227   bool isAdd = true;
5228   if (Tok.is(AsmToken::Plus)) {
5229     Parser.Lex(); // Eat the '+' token.
5230     haveEaten = true;
5231   } else if (Tok.is(AsmToken::Minus)) {
5232     Parser.Lex(); // Eat the '-' token.
5233     isAdd = false;
5234     haveEaten = true;
5235   }
5236 
5237   Tok = Parser.getTok();
5238   int Reg = tryParseRegister();
5239   if (Reg == -1) {
5240     if (!haveEaten)
5241       return MatchOperand_NoMatch;
5242     Error(Tok.getLoc(), "register expected");
5243     return MatchOperand_ParseFail;
5244   }
5245 
5246   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
5247                                                   0, S, Tok.getEndLoc()));
5248 
5249   return MatchOperand_Success;
5250 }
5251 
5252 /// Convert parsed operands to MCInst.  Needed here because this instruction
5253 /// only has two register operands, but multiplication is commutative so
5254 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
5255 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
5256                                     const OperandVector &Operands) {
5257   ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
5258   ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
5259   // If we have a three-operand form, make sure to set Rn to be the operand
5260   // that isn't the same as Rd.
5261   unsigned RegOp = 4;
5262   if (Operands.size() == 6 &&
5263       ((ARMOperand &)*Operands[4]).getReg() ==
5264           ((ARMOperand &)*Operands[3]).getReg())
5265     RegOp = 5;
5266   ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
5267   Inst.addOperand(Inst.getOperand(0));
5268   ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
5269 }
5270 
5271 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5272                                     const OperandVector &Operands) {
5273   int CondOp = -1, ImmOp = -1;
5274   switch(Inst.getOpcode()) {
5275     case ARM::tB:
5276     case ARM::tBcc:  CondOp = 1; ImmOp = 2; break;
5277 
5278     case ARM::t2B:
5279     case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
5280 
5281     default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
5282   }
5283   // first decide whether or not the branch should be conditional
5284   // by looking at it's location relative to an IT block
5285   if(inITBlock()) {
5286     // inside an IT block we cannot have any conditional branches. any
5287     // such instructions needs to be converted to unconditional form
5288     switch(Inst.getOpcode()) {
5289       case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5290       case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5291     }
5292   } else {
5293     // outside IT blocks we can only have unconditional branches with AL
5294     // condition code or conditional branches with non-AL condition code
5295     unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
5296     switch(Inst.getOpcode()) {
5297       case ARM::tB:
5298       case ARM::tBcc:
5299         Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5300         break;
5301       case ARM::t2B:
5302       case ARM::t2Bcc:
5303         Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5304         break;
5305     }
5306   }
5307 
5308   // now decide on encoding size based on branch target range
5309   switch(Inst.getOpcode()) {
5310     // classify tB as either t2B or t1B based on range of immediate operand
5311     case ARM::tB: {
5312       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5313       if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5314         Inst.setOpcode(ARM::t2B);
5315       break;
5316     }
5317     // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5318     case ARM::tBcc: {
5319       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5320       if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5321         Inst.setOpcode(ARM::t2Bcc);
5322       break;
5323     }
5324   }
5325   ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
5326   ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
5327 }
5328 
5329 /// Parse an ARM memory expression, return false if successful else return true
5330 /// or an error.  The first token must be a '[' when called.
5331 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5332   MCAsmParser &Parser = getParser();
5333   SMLoc S, E;
5334   if (Parser.getTok().isNot(AsmToken::LBrac))
5335     return TokError("Token is not a Left Bracket");
5336   S = Parser.getTok().getLoc();
5337   Parser.Lex(); // Eat left bracket token.
5338 
5339   const AsmToken &BaseRegTok = Parser.getTok();
5340   int BaseRegNum = tryParseRegister();
5341   if (BaseRegNum == -1)
5342     return Error(BaseRegTok.getLoc(), "register expected");
5343 
5344   // The next token must either be a comma, a colon or a closing bracket.
5345   const AsmToken &Tok = Parser.getTok();
5346   if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5347       !Tok.is(AsmToken::RBrac))
5348     return Error(Tok.getLoc(), "malformed memory operand");
5349 
5350   if (Tok.is(AsmToken::RBrac)) {
5351     E = Tok.getEndLoc();
5352     Parser.Lex(); // Eat right bracket token.
5353 
5354     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5355                                              ARM_AM::no_shift, 0, 0, false,
5356                                              S, E));
5357 
5358     // If there's a pre-indexing writeback marker, '!', just add it as a token
5359     // operand. It's rather odd, but syntactically valid.
5360     if (Parser.getTok().is(AsmToken::Exclaim)) {
5361       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5362       Parser.Lex(); // Eat the '!'.
5363     }
5364 
5365     return false;
5366   }
5367 
5368   assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5369          "Lost colon or comma in memory operand?!");
5370   if (Tok.is(AsmToken::Comma)) {
5371     Parser.Lex(); // Eat the comma.
5372   }
5373 
5374   // If we have a ':', it's an alignment specifier.
5375   if (Parser.getTok().is(AsmToken::Colon)) {
5376     Parser.Lex(); // Eat the ':'.
5377     E = Parser.getTok().getLoc();
5378     SMLoc AlignmentLoc = Tok.getLoc();
5379 
5380     const MCExpr *Expr;
5381     if (getParser().parseExpression(Expr))
5382      return true;
5383 
5384     // The expression has to be a constant. Memory references with relocations
5385     // don't come through here, as they use the <label> forms of the relevant
5386     // instructions.
5387     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5388     if (!CE)
5389       return Error (E, "constant expression expected");
5390 
5391     unsigned Align = 0;
5392     switch (CE->getValue()) {
5393     default:
5394       return Error(E,
5395                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5396     case 16:  Align = 2; break;
5397     case 32:  Align = 4; break;
5398     case 64:  Align = 8; break;
5399     case 128: Align = 16; break;
5400     case 256: Align = 32; break;
5401     }
5402 
5403     // Now we should have the closing ']'
5404     if (Parser.getTok().isNot(AsmToken::RBrac))
5405       return Error(Parser.getTok().getLoc(), "']' expected");
5406     E = Parser.getTok().getEndLoc();
5407     Parser.Lex(); // Eat right bracket token.
5408 
5409     // Don't worry about range checking the value here. That's handled by
5410     // the is*() predicates.
5411     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5412                                              ARM_AM::no_shift, 0, Align,
5413                                              false, S, E, AlignmentLoc));
5414 
5415     // If there's a pre-indexing writeback marker, '!', just add it as a token
5416     // operand.
5417     if (Parser.getTok().is(AsmToken::Exclaim)) {
5418       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5419       Parser.Lex(); // Eat the '!'.
5420     }
5421 
5422     return false;
5423   }
5424 
5425   // If we have a '#', it's an immediate offset, else assume it's a register
5426   // offset. Be friendly and also accept a plain integer (without a leading
5427   // hash) for gas compatibility.
5428   if (Parser.getTok().is(AsmToken::Hash) ||
5429       Parser.getTok().is(AsmToken::Dollar) ||
5430       Parser.getTok().is(AsmToken::Integer)) {
5431     if (Parser.getTok().isNot(AsmToken::Integer))
5432       Parser.Lex(); // Eat '#' or '$'.
5433     E = Parser.getTok().getLoc();
5434 
5435     bool isNegative = getParser().getTok().is(AsmToken::Minus);
5436     const MCExpr *Offset;
5437     if (getParser().parseExpression(Offset))
5438      return true;
5439 
5440     // The expression has to be a constant. Memory references with relocations
5441     // don't come through here, as they use the <label> forms of the relevant
5442     // instructions.
5443     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5444     if (!CE)
5445       return Error (E, "constant expression expected");
5446 
5447     // If the constant was #-0, represent it as
5448     // std::numeric_limits<int32_t>::min().
5449     int32_t Val = CE->getValue();
5450     if (isNegative && Val == 0)
5451       CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5452                                   getContext());
5453 
5454     // Now we should have the closing ']'
5455     if (Parser.getTok().isNot(AsmToken::RBrac))
5456       return Error(Parser.getTok().getLoc(), "']' expected");
5457     E = Parser.getTok().getEndLoc();
5458     Parser.Lex(); // Eat right bracket token.
5459 
5460     // Don't worry about range checking the value here. That's handled by
5461     // the is*() predicates.
5462     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
5463                                              ARM_AM::no_shift, 0, 0,
5464                                              false, S, E));
5465 
5466     // If there's a pre-indexing writeback marker, '!', just add it as a token
5467     // operand.
5468     if (Parser.getTok().is(AsmToken::Exclaim)) {
5469       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5470       Parser.Lex(); // Eat the '!'.
5471     }
5472 
5473     return false;
5474   }
5475 
5476   // The register offset is optionally preceded by a '+' or '-'
5477   bool isNegative = false;
5478   if (Parser.getTok().is(AsmToken::Minus)) {
5479     isNegative = true;
5480     Parser.Lex(); // Eat the '-'.
5481   } else if (Parser.getTok().is(AsmToken::Plus)) {
5482     // Nothing to do.
5483     Parser.Lex(); // Eat the '+'.
5484   }
5485 
5486   E = Parser.getTok().getLoc();
5487   int OffsetRegNum = tryParseRegister();
5488   if (OffsetRegNum == -1)
5489     return Error(E, "register expected");
5490 
5491   // If there's a shift operator, handle it.
5492   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
5493   unsigned ShiftImm = 0;
5494   if (Parser.getTok().is(AsmToken::Comma)) {
5495     Parser.Lex(); // Eat the ','.
5496     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5497       return true;
5498   }
5499 
5500   // Now we should have the closing ']'
5501   if (Parser.getTok().isNot(AsmToken::RBrac))
5502     return Error(Parser.getTok().getLoc(), "']' expected");
5503   E = Parser.getTok().getEndLoc();
5504   Parser.Lex(); // Eat right bracket token.
5505 
5506   Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5507                                            ShiftType, ShiftImm, 0, isNegative,
5508                                            S, E));
5509 
5510   // If there's a pre-indexing writeback marker, '!', just add it as a token
5511   // operand.
5512   if (Parser.getTok().is(AsmToken::Exclaim)) {
5513     Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5514     Parser.Lex(); // Eat the '!'.
5515   }
5516 
5517   return false;
5518 }
5519 
5520 /// parseMemRegOffsetShift - one of these two:
5521 ///   ( lsl | lsr | asr | ror ) , # shift_amount
5522 ///   rrx
5523 /// return true if it parses a shift otherwise it returns false.
5524 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5525                                           unsigned &Amount) {
5526   MCAsmParser &Parser = getParser();
5527   SMLoc Loc = Parser.getTok().getLoc();
5528   const AsmToken &Tok = Parser.getTok();
5529   if (Tok.isNot(AsmToken::Identifier))
5530     return Error(Loc, "illegal shift operator");
5531   StringRef ShiftName = Tok.getString();
5532   if (ShiftName == "lsl" || ShiftName == "LSL" ||
5533       ShiftName == "asl" || ShiftName == "ASL")
5534     St = ARM_AM::lsl;
5535   else if (ShiftName == "lsr" || ShiftName == "LSR")
5536     St = ARM_AM::lsr;
5537   else if (ShiftName == "asr" || ShiftName == "ASR")
5538     St = ARM_AM::asr;
5539   else if (ShiftName == "ror" || ShiftName == "ROR")
5540     St = ARM_AM::ror;
5541   else if (ShiftName == "rrx" || ShiftName == "RRX")
5542     St = ARM_AM::rrx;
5543   else
5544     return Error(Loc, "illegal shift operator");
5545   Parser.Lex(); // Eat shift type token.
5546 
5547   // rrx stands alone.
5548   Amount = 0;
5549   if (St != ARM_AM::rrx) {
5550     Loc = Parser.getTok().getLoc();
5551     // A '#' and a shift amount.
5552     const AsmToken &HashTok = Parser.getTok();
5553     if (HashTok.isNot(AsmToken::Hash) &&
5554         HashTok.isNot(AsmToken::Dollar))
5555       return Error(HashTok.getLoc(), "'#' expected");
5556     Parser.Lex(); // Eat hash token.
5557 
5558     const MCExpr *Expr;
5559     if (getParser().parseExpression(Expr))
5560       return true;
5561     // Range check the immediate.
5562     // lsl, ror: 0 <= imm <= 31
5563     // lsr, asr: 0 <= imm <= 32
5564     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5565     if (!CE)
5566       return Error(Loc, "shift amount must be an immediate");
5567     int64_t Imm = CE->getValue();
5568     if (Imm < 0 ||
5569         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5570         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5571       return Error(Loc, "immediate shift value out of range");
5572     // If <ShiftTy> #0, turn it into a no_shift.
5573     if (Imm == 0)
5574       St = ARM_AM::lsl;
5575     // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5576     if (Imm == 32)
5577       Imm = 0;
5578     Amount = Imm;
5579   }
5580 
5581   return false;
5582 }
5583 
5584 /// parseFPImm - A floating point immediate expression operand.
5585 OperandMatchResultTy
5586 ARMAsmParser::parseFPImm(OperandVector &Operands) {
5587   MCAsmParser &Parser = getParser();
5588   // Anything that can accept a floating point constant as an operand
5589   // needs to go through here, as the regular parseExpression is
5590   // integer only.
5591   //
5592   // This routine still creates a generic Immediate operand, containing
5593   // a bitcast of the 64-bit floating point value. The various operands
5594   // that accept floats can check whether the value is valid for them
5595   // via the standard is*() predicates.
5596 
5597   SMLoc S = Parser.getTok().getLoc();
5598 
5599   if (Parser.getTok().isNot(AsmToken::Hash) &&
5600       Parser.getTok().isNot(AsmToken::Dollar))
5601     return MatchOperand_NoMatch;
5602 
5603   // Disambiguate the VMOV forms that can accept an FP immediate.
5604   // vmov.f32 <sreg>, #imm
5605   // vmov.f64 <dreg>, #imm
5606   // vmov.f32 <dreg>, #imm  @ vector f32x2
5607   // vmov.f32 <qreg>, #imm  @ vector f32x4
5608   //
5609   // There are also the NEON VMOV instructions which expect an
5610   // integer constant. Make sure we don't try to parse an FPImm
5611   // for these:
5612   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5613   ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5614   bool isVmovf = TyOp.isToken() &&
5615                  (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5616                   TyOp.getToken() == ".f16");
5617   ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5618   bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5619                                          Mnemonic.getToken() == "fconsts");
5620   if (!(isVmovf || isFconst))
5621     return MatchOperand_NoMatch;
5622 
5623   Parser.Lex(); // Eat '#' or '$'.
5624 
5625   // Handle negation, as that still comes through as a separate token.
5626   bool isNegative = false;
5627   if (Parser.getTok().is(AsmToken::Minus)) {
5628     isNegative = true;
5629     Parser.Lex();
5630   }
5631   const AsmToken &Tok = Parser.getTok();
5632   SMLoc Loc = Tok.getLoc();
5633   if (Tok.is(AsmToken::Real) && isVmovf) {
5634     APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
5635     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5636     // If we had a '-' in front, toggle the sign bit.
5637     IntVal ^= (uint64_t)isNegative << 31;
5638     Parser.Lex(); // Eat the token.
5639     Operands.push_back(ARMOperand::CreateImm(
5640           MCConstantExpr::create(IntVal, getContext()),
5641           S, Parser.getTok().getLoc()));
5642     return MatchOperand_Success;
5643   }
5644   // Also handle plain integers. Instructions which allow floating point
5645   // immediates also allow a raw encoded 8-bit value.
5646   if (Tok.is(AsmToken::Integer) && isFconst) {
5647     int64_t Val = Tok.getIntVal();
5648     Parser.Lex(); // Eat the token.
5649     if (Val > 255 || Val < 0) {
5650       Error(Loc, "encoded floating point value out of range");
5651       return MatchOperand_ParseFail;
5652     }
5653     float RealVal = ARM_AM::getFPImmFloat(Val);
5654     Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
5655 
5656     Operands.push_back(ARMOperand::CreateImm(
5657         MCConstantExpr::create(Val, getContext()), S,
5658         Parser.getTok().getLoc()));
5659     return MatchOperand_Success;
5660   }
5661 
5662   Error(Loc, "invalid floating point immediate");
5663   return MatchOperand_ParseFail;
5664 }
5665 
5666 /// Parse a arm instruction operand.  For now this parses the operand regardless
5667 /// of the mnemonic.
5668 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
5669   MCAsmParser &Parser = getParser();
5670   SMLoc S, E;
5671 
5672   // Check if the current operand has a custom associated parser, if so, try to
5673   // custom parse the operand, or fallback to the general approach.
5674   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
5675   if (ResTy == MatchOperand_Success)
5676     return false;
5677   // If there wasn't a custom match, try the generic matcher below. Otherwise,
5678   // there was a match, but an error occurred, in which case, just return that
5679   // the operand parsing failed.
5680   if (ResTy == MatchOperand_ParseFail)
5681     return true;
5682 
5683   switch (getLexer().getKind()) {
5684   default:
5685     Error(Parser.getTok().getLoc(), "unexpected token in operand");
5686     return true;
5687   case AsmToken::Identifier: {
5688     // If we've seen a branch mnemonic, the next operand must be a label.  This
5689     // is true even if the label is a register name.  So "br r1" means branch to
5690     // label "r1".
5691     bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
5692     if (!ExpectLabel) {
5693       if (!tryParseRegisterWithWriteBack(Operands))
5694         return false;
5695       int Res = tryParseShiftRegister(Operands);
5696       if (Res == 0) // success
5697         return false;
5698       else if (Res == -1) // irrecoverable error
5699         return true;
5700       // If this is VMRS, check for the apsr_nzcv operand.
5701       if (Mnemonic == "vmrs" &&
5702           Parser.getTok().getString().equals_lower("apsr_nzcv")) {
5703         S = Parser.getTok().getLoc();
5704         Parser.Lex();
5705         Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
5706         return false;
5707       }
5708     }
5709 
5710     // Fall though for the Identifier case that is not a register or a
5711     // special name.
5712     LLVM_FALLTHROUGH;
5713   }
5714   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
5715   case AsmToken::Integer: // things like 1f and 2b as a branch targets
5716   case AsmToken::String:  // quoted label names.
5717   case AsmToken::Dot: {   // . as a branch target
5718     // This was not a register so parse other operands that start with an
5719     // identifier (like labels) as expressions and create them as immediates.
5720     const MCExpr *IdVal;
5721     S = Parser.getTok().getLoc();
5722     if (getParser().parseExpression(IdVal))
5723       return true;
5724     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5725     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
5726     return false;
5727   }
5728   case AsmToken::LBrac:
5729     return parseMemory(Operands);
5730   case AsmToken::LCurly:
5731     return parseRegisterList(Operands, !Mnemonic.startswith("clr"));
5732   case AsmToken::Dollar:
5733   case AsmToken::Hash:
5734     // #42 -> immediate.
5735     S = Parser.getTok().getLoc();
5736     Parser.Lex();
5737 
5738     if (Parser.getTok().isNot(AsmToken::Colon)) {
5739       bool isNegative = Parser.getTok().is(AsmToken::Minus);
5740       const MCExpr *ImmVal;
5741       if (getParser().parseExpression(ImmVal))
5742         return true;
5743       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
5744       if (CE) {
5745         int32_t Val = CE->getValue();
5746         if (isNegative && Val == 0)
5747           ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5748                                           getContext());
5749       }
5750       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5751       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
5752 
5753       // There can be a trailing '!' on operands that we want as a separate
5754       // '!' Token operand. Handle that here. For example, the compatibility
5755       // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
5756       if (Parser.getTok().is(AsmToken::Exclaim)) {
5757         Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
5758                                                    Parser.getTok().getLoc()));
5759         Parser.Lex(); // Eat exclaim token
5760       }
5761       return false;
5762     }
5763     // w/ a ':' after the '#', it's just like a plain ':'.
5764     LLVM_FALLTHROUGH;
5765 
5766   case AsmToken::Colon: {
5767     S = Parser.getTok().getLoc();
5768     // ":lower16:" and ":upper16:" expression prefixes
5769     // FIXME: Check it's an expression prefix,
5770     // e.g. (FOO - :lower16:BAR) isn't legal.
5771     ARMMCExpr::VariantKind RefKind;
5772     if (parsePrefix(RefKind))
5773       return true;
5774 
5775     const MCExpr *SubExprVal;
5776     if (getParser().parseExpression(SubExprVal))
5777       return true;
5778 
5779     const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
5780                                               getContext());
5781     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5782     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
5783     return false;
5784   }
5785   case AsmToken::Equal: {
5786     S = Parser.getTok().getLoc();
5787     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5788       return Error(S, "unexpected token in operand");
5789     Parser.Lex(); // Eat '='
5790     const MCExpr *SubExprVal;
5791     if (getParser().parseExpression(SubExprVal))
5792       return true;
5793     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5794 
5795     // execute-only: we assume that assembly programmers know what they are
5796     // doing and allow literal pool creation here
5797     Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
5798     return false;
5799   }
5800   }
5801 }
5802 
5803 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
5804 //  :lower16: and :upper16:.
5805 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
5806   MCAsmParser &Parser = getParser();
5807   RefKind = ARMMCExpr::VK_ARM_None;
5808 
5809   // consume an optional '#' (GNU compatibility)
5810   if (getLexer().is(AsmToken::Hash))
5811     Parser.Lex();
5812 
5813   // :lower16: and :upper16: modifiers
5814   assert(getLexer().is(AsmToken::Colon) && "expected a :");
5815   Parser.Lex(); // Eat ':'
5816 
5817   if (getLexer().isNot(AsmToken::Identifier)) {
5818     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
5819     return true;
5820   }
5821 
5822   enum {
5823     COFF = (1 << MCObjectFileInfo::IsCOFF),
5824     ELF = (1 << MCObjectFileInfo::IsELF),
5825     MACHO = (1 << MCObjectFileInfo::IsMachO),
5826     WASM = (1 << MCObjectFileInfo::IsWasm),
5827   };
5828   static const struct PrefixEntry {
5829     const char *Spelling;
5830     ARMMCExpr::VariantKind VariantKind;
5831     uint8_t SupportedFormats;
5832   } PrefixEntries[] = {
5833     { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
5834     { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
5835   };
5836 
5837   StringRef IDVal = Parser.getTok().getIdentifier();
5838 
5839   const auto &Prefix =
5840       std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
5841                    [&IDVal](const PrefixEntry &PE) {
5842                       return PE.Spelling == IDVal;
5843                    });
5844   if (Prefix == std::end(PrefixEntries)) {
5845     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
5846     return true;
5847   }
5848 
5849   uint8_t CurrentFormat;
5850   switch (getContext().getObjectFileInfo()->getObjectFileType()) {
5851   case MCObjectFileInfo::IsMachO:
5852     CurrentFormat = MACHO;
5853     break;
5854   case MCObjectFileInfo::IsELF:
5855     CurrentFormat = ELF;
5856     break;
5857   case MCObjectFileInfo::IsCOFF:
5858     CurrentFormat = COFF;
5859     break;
5860   case MCObjectFileInfo::IsWasm:
5861     CurrentFormat = WASM;
5862     break;
5863   case MCObjectFileInfo::IsXCOFF:
5864     llvm_unreachable("unexpected object format");
5865     break;
5866   }
5867 
5868   if (~Prefix->SupportedFormats & CurrentFormat) {
5869     Error(Parser.getTok().getLoc(),
5870           "cannot represent relocation in the current file format");
5871     return true;
5872   }
5873 
5874   RefKind = Prefix->VariantKind;
5875   Parser.Lex();
5876 
5877   if (getLexer().isNot(AsmToken::Colon)) {
5878     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
5879     return true;
5880   }
5881   Parser.Lex(); // Eat the last ':'
5882 
5883   return false;
5884 }
5885 
5886 /// Given a mnemonic, split out possible predication code and carry
5887 /// setting letters to form a canonical mnemonic and flags.
5888 //
5889 // FIXME: Would be nice to autogen this.
5890 // FIXME: This is a bit of a maze of special cases.
5891 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
5892                                       StringRef ExtraToken,
5893                                       unsigned &PredicationCode,
5894                                       unsigned &VPTPredicationCode,
5895                                       bool &CarrySetting,
5896                                       unsigned &ProcessorIMod,
5897                                       StringRef &ITMask) {
5898   PredicationCode = ARMCC::AL;
5899   VPTPredicationCode = ARMVCC::None;
5900   CarrySetting = false;
5901   ProcessorIMod = 0;
5902 
5903   // Ignore some mnemonics we know aren't predicated forms.
5904   //
5905   // FIXME: Would be nice to autogen this.
5906   if ((Mnemonic == "movs" && isThumb()) ||
5907       Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
5908       Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
5909       Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
5910       Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
5911       Mnemonic == "vaclt" || Mnemonic == "vacle"  || Mnemonic == "hlt" ||
5912       Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
5913       Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
5914       Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
5915       Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
5916       Mnemonic == "vcvta" || Mnemonic == "vcvtn"  || Mnemonic == "vcvtp" ||
5917       Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
5918       Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
5919       Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
5920       Mnemonic == "bxns"  || Mnemonic == "blxns" ||
5921       Mnemonic == "vudot" || Mnemonic == "vsdot" ||
5922       Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
5923       Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
5924       Mnemonic == "wls" || Mnemonic == "le" || Mnemonic == "dls" ||
5925       Mnemonic == "csel" || Mnemonic == "csinc" ||
5926       Mnemonic == "csinv" || Mnemonic == "csneg" || Mnemonic == "cinc" ||
5927       Mnemonic == "cinv" || Mnemonic == "cneg" || Mnemonic == "cset" ||
5928       Mnemonic == "csetm")
5929     return Mnemonic;
5930 
5931   // First, split out any predication code. Ignore mnemonics we know aren't
5932   // predicated but do have a carry-set and so weren't caught above.
5933   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
5934       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
5935       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
5936       Mnemonic != "sbcs" && Mnemonic != "rscs") {
5937     unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
5938     if (CC != ~0U) {
5939       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
5940       PredicationCode = CC;
5941     }
5942   }
5943 
5944   // Next, determine if we have a carry setting bit. We explicitly ignore all
5945   // the instructions we know end in 's'.
5946   if (Mnemonic.endswith("s") &&
5947       !(Mnemonic == "cps" || Mnemonic == "mls" ||
5948         Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
5949         Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
5950         Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
5951         Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
5952         Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
5953         Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
5954         Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
5955         Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
5956         Mnemonic == "bxns" || Mnemonic == "blxns" ||
5957         (Mnemonic == "movs" && isThumb()))) {
5958     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
5959     CarrySetting = true;
5960   }
5961 
5962   // The "cps" instruction can have a interrupt mode operand which is glued into
5963   // the mnemonic. Check if this is the case, split it and parse the imod op
5964   if (Mnemonic.startswith("cps")) {
5965     // Split out any imod code.
5966     unsigned IMod =
5967       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
5968       .Case("ie", ARM_PROC::IE)
5969       .Case("id", ARM_PROC::ID)
5970       .Default(~0U);
5971     if (IMod != ~0U) {
5972       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
5973       ProcessorIMod = IMod;
5974     }
5975   }
5976 
5977   if (isMnemonicVPTPredicable(Mnemonic, ExtraToken)) {
5978     unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1));
5979     if (CC != ~0U) {
5980       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1);
5981       VPTPredicationCode = CC;
5982     }
5983     return Mnemonic;
5984   }
5985 
5986   // The "it" instruction has the condition mask on the end of the mnemonic.
5987   if (Mnemonic.startswith("it")) {
5988     ITMask = Mnemonic.slice(2, Mnemonic.size());
5989     Mnemonic = Mnemonic.slice(0, 2);
5990   }
5991 
5992   if (Mnemonic.startswith("vpst")) {
5993     ITMask = Mnemonic.slice(4, Mnemonic.size());
5994     Mnemonic = Mnemonic.slice(0, 4);
5995   }
5996   else if (Mnemonic.startswith("vpt")) {
5997     ITMask = Mnemonic.slice(3, Mnemonic.size());
5998     Mnemonic = Mnemonic.slice(0, 3);
5999   }
6000 
6001   return Mnemonic;
6002 }
6003 
6004 /// Given a canonical mnemonic, determine if the instruction ever allows
6005 /// inclusion of carry set or predication code operands.
6006 //
6007 // FIXME: It would be nice to autogen this.
6008 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic,
6009                                          StringRef ExtraToken,
6010                                          StringRef FullInst,
6011                                          bool &CanAcceptCarrySet,
6012                                          bool &CanAcceptPredicationCode,
6013                                          bool &CanAcceptVPTPredicationCode) {
6014   CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken);
6015 
6016   CanAcceptCarrySet =
6017       Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6018       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
6019       Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
6020       Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
6021       Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
6022       Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
6023       Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
6024       (!isThumb() &&
6025        (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
6026         Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
6027 
6028   if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
6029       Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
6030       Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
6031       Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
6032       Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
6033       Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
6034       Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
6035       Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
6036       Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
6037       Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
6038       (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
6039       Mnemonic == "vmovx" || Mnemonic == "vins" ||
6040       Mnemonic == "vudot" || Mnemonic == "vsdot" ||
6041       Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
6042       Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
6043       Mnemonic == "sb"    || Mnemonic == "ssbb"  ||
6044       Mnemonic == "pssbb" ||
6045       Mnemonic == "bfcsel" || Mnemonic == "wls" ||
6046       Mnemonic == "dls" || Mnemonic == "le" || Mnemonic == "csel" ||
6047       Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" ||
6048       Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" ||
6049       Mnemonic == "cset" || Mnemonic == "csetm" ||
6050       Mnemonic.startswith("vpt") || Mnemonic.startswith("vpst")) {
6051     // These mnemonics are never predicable
6052     CanAcceptPredicationCode = false;
6053   } else if (!isThumb()) {
6054     // Some instructions are only predicable in Thumb mode
6055     CanAcceptPredicationCode =
6056         Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
6057         Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
6058         Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
6059         Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
6060         Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
6061         Mnemonic != "stc2" && Mnemonic != "stc2l" &&
6062         Mnemonic != "tsb" &&
6063         !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
6064   } else if (isThumbOne()) {
6065     if (hasV6MOps())
6066       CanAcceptPredicationCode = Mnemonic != "movs";
6067     else
6068       CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
6069   } else
6070     CanAcceptPredicationCode = true;
6071 }
6072 
6073 // Some Thumb instructions have two operand forms that are not
6074 // available as three operand, convert to two operand form if possible.
6075 //
6076 // FIXME: We would really like to be able to tablegen'erate this.
6077 void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
6078                                                  bool CarrySetting,
6079                                                  OperandVector &Operands) {
6080   if (Operands.size() != 6)
6081     return;
6082 
6083   const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6084         auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
6085   if (!Op3.isReg() || !Op4.isReg())
6086     return;
6087 
6088   auto Op3Reg = Op3.getReg();
6089   auto Op4Reg = Op4.getReg();
6090 
6091   // For most Thumb2 cases we just generate the 3 operand form and reduce
6092   // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
6093   // won't accept SP or PC so we do the transformation here taking care
6094   // with immediate range in the 'add sp, sp #imm' case.
6095   auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
6096   if (isThumbTwo()) {
6097     if (Mnemonic != "add")
6098       return;
6099     bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
6100                         (Op5.isReg() && Op5.getReg() == ARM::PC);
6101     if (!TryTransform) {
6102       TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
6103                       (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
6104                      !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
6105                        Op5.isImm() && !Op5.isImm0_508s4());
6106     }
6107     if (!TryTransform)
6108       return;
6109   } else if (!isThumbOne())
6110     return;
6111 
6112   if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
6113         Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
6114         Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
6115         Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
6116     return;
6117 
6118   // If first 2 operands of a 3 operand instruction are the same
6119   // then transform to 2 operand version of the same instruction
6120   // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
6121   bool Transform = Op3Reg == Op4Reg;
6122 
6123   // For communtative operations, we might be able to transform if we swap
6124   // Op4 and Op5.  The 'ADD Rdm, SP, Rdm' form is already handled specially
6125   // as tADDrsp.
6126   const ARMOperand *LastOp = &Op5;
6127   bool Swap = false;
6128   if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
6129       ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
6130        Mnemonic == "and" || Mnemonic == "eor" ||
6131        Mnemonic == "adc" || Mnemonic == "orr")) {
6132     Swap = true;
6133     LastOp = &Op4;
6134     Transform = true;
6135   }
6136 
6137   // If both registers are the same then remove one of them from
6138   // the operand list, with certain exceptions.
6139   if (Transform) {
6140     // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
6141     // 2 operand forms don't exist.
6142     if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
6143         LastOp->isReg())
6144       Transform = false;
6145 
6146     // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
6147     // 3-bits because the ARMARM says not to.
6148     if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
6149       Transform = false;
6150   }
6151 
6152   if (Transform) {
6153     if (Swap)
6154       std::swap(Op4, Op5);
6155     Operands.erase(Operands.begin() + 3);
6156   }
6157 }
6158 
6159 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
6160                                           OperandVector &Operands) {
6161   // FIXME: This is all horribly hacky. We really need a better way to deal
6162   // with optional operands like this in the matcher table.
6163 
6164   // The 'mov' mnemonic is special. One variant has a cc_out operand, while
6165   // another does not. Specifically, the MOVW instruction does not. So we
6166   // special case it here and remove the defaulted (non-setting) cc_out
6167   // operand if that's the instruction we're trying to match.
6168   //
6169   // We do this as post-processing of the explicit operands rather than just
6170   // conditionally adding the cc_out in the first place because we need
6171   // to check the type of the parsed immediate operand.
6172   if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
6173       !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
6174       static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
6175       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6176     return true;
6177 
6178   // Register-register 'add' for thumb does not have a cc_out operand
6179   // when there are only two register operands.
6180   if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
6181       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6182       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6183       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
6184     return true;
6185   // Register-register 'add' for thumb does not have a cc_out operand
6186   // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
6187   // have to check the immediate range here since Thumb2 has a variant
6188   // that can handle a different range and has a cc_out operand.
6189   if (((isThumb() && Mnemonic == "add") ||
6190        (isThumbTwo() && Mnemonic == "sub")) &&
6191       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6192       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6193       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
6194       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6195       ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
6196        static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
6197     return true;
6198   // For Thumb2, add/sub immediate does not have a cc_out operand for the
6199   // imm0_4095 variant. That's the least-preferred variant when
6200   // selecting via the generic "add" mnemonic, so to know that we
6201   // should remove the cc_out operand, we have to explicitly check that
6202   // it's not one of the other variants. Ugh.
6203   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
6204       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6205       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6206       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6207     // Nest conditions rather than one big 'if' statement for readability.
6208     //
6209     // If both registers are low, we're in an IT block, and the immediate is
6210     // in range, we should use encoding T1 instead, which has a cc_out.
6211     if (inITBlock() &&
6212         isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
6213         isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
6214         static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
6215       return false;
6216     // Check against T3. If the second register is the PC, this is an
6217     // alternate form of ADR, which uses encoding T4, so check for that too.
6218     if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
6219         static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
6220       return false;
6221 
6222     // Otherwise, we use encoding T4, which does not have a cc_out
6223     // operand.
6224     return true;
6225   }
6226 
6227   // The thumb2 multiply instruction doesn't have a CCOut register, so
6228   // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
6229   // use the 16-bit encoding or not.
6230   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
6231       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6232       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6233       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6234       static_cast<ARMOperand &>(*Operands[5]).isReg() &&
6235       // If the registers aren't low regs, the destination reg isn't the
6236       // same as one of the source regs, or the cc_out operand is zero
6237       // outside of an IT block, we have to use the 32-bit encoding, so
6238       // remove the cc_out operand.
6239       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6240        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6241        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
6242        !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6243                             static_cast<ARMOperand &>(*Operands[5]).getReg() &&
6244                         static_cast<ARMOperand &>(*Operands[3]).getReg() !=
6245                             static_cast<ARMOperand &>(*Operands[4]).getReg())))
6246     return true;
6247 
6248   // Also check the 'mul' syntax variant that doesn't specify an explicit
6249   // destination register.
6250   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
6251       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6252       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6253       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6254       // If the registers aren't low regs  or the cc_out operand is zero
6255       // outside of an IT block, we have to use the 32-bit encoding, so
6256       // remove the cc_out operand.
6257       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
6258        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
6259        !inITBlock()))
6260     return true;
6261 
6262   // Register-register 'add/sub' for thumb does not have a cc_out operand
6263   // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
6264   // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
6265   // right, this will result in better diagnostics (which operand is off)
6266   // anyway.
6267   if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
6268       (Operands.size() == 5 || Operands.size() == 6) &&
6269       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6270       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
6271       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
6272       (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
6273        (Operands.size() == 6 &&
6274         static_cast<ARMOperand &>(*Operands[5]).isImm())))
6275     return true;
6276 
6277   return false;
6278 }
6279 
6280 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
6281                                               OperandVector &Operands) {
6282   // VRINT{Z, X} have a predicate operand in VFP, but not in NEON
6283   unsigned RegIdx = 3;
6284   if ((Mnemonic == "vrintz" || Mnemonic == "vrintx") &&
6285       (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
6286        static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
6287     if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6288         (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
6289          static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
6290       RegIdx = 4;
6291 
6292     if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
6293         (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
6294              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
6295          ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6296              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
6297       return true;
6298   }
6299   return false;
6300 }
6301 
6302 bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic,
6303                                                     OperandVector &Operands) {
6304   if (!hasMVE() || Operands.size() < 3)
6305     return true;
6306 
6307   for (auto &Operand : Operands) {
6308     // We check the larger class QPR instead of just the legal class
6309     // MQPR, to more accurately report errors when using Q registers
6310     // outside of the allowed range.
6311     if (static_cast<ARMOperand &>(*Operand).isVectorIndex() ||
6312         (Operand->isReg() &&
6313          (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
6314              Operand->getReg()))))
6315       return false;
6316   }
6317   return true;
6318 }
6319 
6320 static bool isDataTypeToken(StringRef Tok) {
6321   return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
6322     Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
6323     Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
6324     Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
6325     Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
6326     Tok == ".f" || Tok == ".d";
6327 }
6328 
6329 // FIXME: This bit should probably be handled via an explicit match class
6330 // in the .td files that matches the suffix instead of having it be
6331 // a literal string token the way it is now.
6332 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
6333   return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
6334 }
6335 
6336 static void applyMnemonicAliases(StringRef &Mnemonic,
6337                                  const FeatureBitset &Features,
6338                                  unsigned VariantID);
6339 
6340 // The GNU assembler has aliases of ldrd and strd with the second register
6341 // omitted. We don't have a way to do that in tablegen, so fix it up here.
6342 //
6343 // We have to be careful to not emit an invalid Rt2 here, because the rest of
6344 // the assmebly parser could then generate confusing diagnostics refering to
6345 // it. If we do find anything that prevents us from doing the transformation we
6346 // bail out, and let the assembly parser report an error on the instruction as
6347 // it is written.
6348 void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6349                                      OperandVector &Operands) {
6350   if (Mnemonic != "ldrd" && Mnemonic != "strd")
6351     return;
6352   if (Operands.size() < 4)
6353     return;
6354 
6355   ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
6356   ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6357 
6358   if (!Op2.isReg())
6359     return;
6360   if (!Op3.isMem())
6361     return;
6362 
6363   const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
6364   if (!GPR.contains(Op2.getReg()))
6365     return;
6366 
6367   unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
6368   if (!isThumb() && (RtEncoding & 1)) {
6369     // In ARM mode, the registers must be from an aligned pair, this
6370     // restriction does not apply in Thumb mode.
6371     return;
6372   }
6373   if (Op2.getReg() == ARM::PC)
6374     return;
6375   unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
6376   if (!PairedReg || PairedReg == ARM::PC ||
6377       (PairedReg == ARM::SP && !hasV8Ops()))
6378     return;
6379 
6380   Operands.insert(
6381       Operands.begin() + 3,
6382       ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6383 }
6384 
6385 /// Parse an arm instruction mnemonic followed by its operands.
6386 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
6387                                     SMLoc NameLoc, OperandVector &Operands) {
6388   MCAsmParser &Parser = getParser();
6389 
6390   // Apply mnemonic aliases before doing anything else, as the destination
6391   // mnemonic may include suffices and we want to handle them normally.
6392   // The generic tblgen'erated code does this later, at the start of
6393   // MatchInstructionImpl(), but that's too late for aliases that include
6394   // any sort of suffix.
6395   const FeatureBitset &AvailableFeatures = getAvailableFeatures();
6396   unsigned AssemblerDialect = getParser().getAssemblerDialect();
6397   applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
6398 
6399   // First check for the ARM-specific .req directive.
6400   if (Parser.getTok().is(AsmToken::Identifier) &&
6401       Parser.getTok().getIdentifier() == ".req") {
6402     parseDirectiveReq(Name, NameLoc);
6403     // We always return 'error' for this, as we're done with this
6404     // statement and don't need to match the 'instruction."
6405     return true;
6406   }
6407 
6408   // Create the leading tokens for the mnemonic, split by '.' characters.
6409   size_t Start = 0, Next = Name.find('.');
6410   StringRef Mnemonic = Name.slice(Start, Next);
6411   StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1));
6412 
6413   // Split out the predication code and carry setting flag from the mnemonic.
6414   unsigned PredicationCode;
6415   unsigned VPTPredicationCode;
6416   unsigned ProcessorIMod;
6417   bool CarrySetting;
6418   StringRef ITMask;
6419   Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode,
6420                            CarrySetting, ProcessorIMod, ITMask);
6421 
6422   // In Thumb1, only the branch (B) instruction can be predicated.
6423   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
6424     return Error(NameLoc, "conditional execution not supported in Thumb1");
6425   }
6426 
6427   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
6428 
6429   // Handle the mask for IT and VPT instructions. In ARMOperand and
6430   // MCOperand, this is stored in a format independent of the
6431   // condition code: the lowest set bit indicates the end of the
6432   // encoding, and above that, a 1 bit indicates 'else', and an 0
6433   // indicates 'then'. E.g.
6434   //    IT    -> 1000
6435   //    ITx   -> x100    (ITT -> 0100, ITE -> 1100)
6436   //    ITxy  -> xy10    (e.g. ITET -> 1010)
6437   //    ITxyz -> xyz1    (e.g. ITEET -> 1101)
6438   if (Mnemonic == "it" || Mnemonic.startswith("vpt") ||
6439       Mnemonic.startswith("vpst")) {
6440     SMLoc Loc = Mnemonic == "it"  ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) :
6441                 Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) :
6442                                     SMLoc::getFromPointer(NameLoc.getPointer() + 4);
6443     if (ITMask.size() > 3) {
6444       if (Mnemonic == "it")
6445         return Error(Loc, "too many conditions on IT instruction");
6446       return Error(Loc, "too many conditions on VPT instruction");
6447     }
6448     unsigned Mask = 8;
6449     for (unsigned i = ITMask.size(); i != 0; --i) {
6450       char pos = ITMask[i - 1];
6451       if (pos != 't' && pos != 'e') {
6452         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
6453       }
6454       Mask >>= 1;
6455       if (ITMask[i - 1] == 'e')
6456         Mask |= 8;
6457     }
6458     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
6459   }
6460 
6461   // FIXME: This is all a pretty gross hack. We should automatically handle
6462   // optional operands like this via tblgen.
6463 
6464   // Next, add the CCOut and ConditionCode operands, if needed.
6465   //
6466   // For mnemonics which can ever incorporate a carry setting bit or predication
6467   // code, our matching model involves us always generating CCOut and
6468   // ConditionCode operands to match the mnemonic "as written" and then we let
6469   // the matcher deal with finding the right instruction or generating an
6470   // appropriate error.
6471   bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode;
6472   getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet,
6473                         CanAcceptPredicationCode, CanAcceptVPTPredicationCode);
6474 
6475   // If we had a carry-set on an instruction that can't do that, issue an
6476   // error.
6477   if (!CanAcceptCarrySet && CarrySetting) {
6478     return Error(NameLoc, "instruction '" + Mnemonic +
6479                  "' can not set flags, but 's' suffix specified");
6480   }
6481   // If we had a predication code on an instruction that can't do that, issue an
6482   // error.
6483   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
6484     return Error(NameLoc, "instruction '" + Mnemonic +
6485                  "' is not predicable, but condition code specified");
6486   }
6487 
6488   // If we had a VPT predication code on an instruction that can't do that, issue an
6489   // error.
6490   if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) {
6491     return Error(NameLoc, "instruction '" + Mnemonic +
6492                  "' is not VPT predicable, but VPT code T/E is specified");
6493   }
6494 
6495   // Add the carry setting operand, if necessary.
6496   if (CanAcceptCarrySet) {
6497     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
6498     Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
6499                                                Loc));
6500   }
6501 
6502   // Add the predication code operand, if necessary.
6503   if (CanAcceptPredicationCode) {
6504     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
6505                                       CarrySetting);
6506     Operands.push_back(ARMOperand::CreateCondCode(
6507                        ARMCC::CondCodes(PredicationCode), Loc));
6508   }
6509 
6510   // Add the VPT predication code operand, if necessary.
6511   if (CanAcceptVPTPredicationCode) {
6512     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
6513                                       CarrySetting);
6514     Operands.push_back(ARMOperand::CreateVPTPred(
6515                          ARMVCC::VPTCodes(VPTPredicationCode), Loc));
6516   }
6517 
6518   // Add the processor imod operand, if necessary.
6519   if (ProcessorIMod) {
6520     Operands.push_back(ARMOperand::CreateImm(
6521           MCConstantExpr::create(ProcessorIMod, getContext()),
6522                                  NameLoc, NameLoc));
6523   } else if (Mnemonic == "cps" && isMClass()) {
6524     return Error(NameLoc, "instruction 'cps' requires effect for M-class");
6525   }
6526 
6527   // Add the remaining tokens in the mnemonic.
6528   while (Next != StringRef::npos) {
6529     Start = Next;
6530     Next = Name.find('.', Start + 1);
6531     ExtraToken = Name.slice(Start, Next);
6532 
6533     // Some NEON instructions have an optional datatype suffix that is
6534     // completely ignored. Check for that.
6535     if (isDataTypeToken(ExtraToken) &&
6536         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
6537       continue;
6538 
6539     // For for ARM mode generate an error if the .n qualifier is used.
6540     if (ExtraToken == ".n" && !isThumb()) {
6541       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6542       return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
6543                    "arm mode");
6544     }
6545 
6546     // The .n qualifier is always discarded as that is what the tables
6547     // and matcher expect.  In ARM mode the .w qualifier has no effect,
6548     // so discard it to avoid errors that can be caused by the matcher.
6549     if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
6550       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6551       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
6552     }
6553   }
6554 
6555   // Read the remaining operands.
6556   if (getLexer().isNot(AsmToken::EndOfStatement)) {
6557     // Read the first operand.
6558     if (parseOperand(Operands, Mnemonic)) {
6559       return true;
6560     }
6561 
6562     while (parseOptionalToken(AsmToken::Comma)) {
6563       // Parse and remember the operand.
6564       if (parseOperand(Operands, Mnemonic)) {
6565         return true;
6566       }
6567     }
6568   }
6569 
6570   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
6571     return true;
6572 
6573   tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
6574 
6575   // Some instructions, mostly Thumb, have forms for the same mnemonic that
6576   // do and don't have a cc_out optional-def operand. With some spot-checks
6577   // of the operand list, we can figure out which variant we're trying to
6578   // parse and adjust accordingly before actually matching. We shouldn't ever
6579   // try to remove a cc_out operand that was explicitly set on the
6580   // mnemonic, of course (CarrySetting == true). Reason number #317 the
6581   // table driven matcher doesn't fit well with the ARM instruction set.
6582   if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
6583     Operands.erase(Operands.begin() + 1);
6584 
6585   // Some instructions have the same mnemonic, but don't always
6586   // have a predicate. Distinguish them here and delete the
6587   // appropriate predicate if needed.  This could be either the scalar
6588   // predication code or the vector predication code.
6589   if (PredicationCode == ARMCC::AL &&
6590       shouldOmitPredicateOperand(Mnemonic, Operands))
6591     Operands.erase(Operands.begin() + 1);
6592 
6593 
6594   if (hasMVE()) {
6595     if (CanAcceptVPTPredicationCode) {
6596       // For all other instructions, make sure only one of the two
6597       // predication operands is left behind, depending on whether we should
6598       // use the vector predication.
6599       if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) {
6600         if (CanAcceptPredicationCode)
6601           Operands.erase(Operands.begin() + 2);
6602         else
6603           Operands.erase(Operands.begin() + 1);
6604       } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) {
6605         Operands.erase(Operands.begin() + 1);
6606       }
6607     }
6608   }
6609 
6610   if (VPTPredicationCode != ARMVCC::None) {
6611     bool usedVPTPredicationCode = false;
6612     for (unsigned I = 1; I < Operands.size(); ++I)
6613       if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
6614         usedVPTPredicationCode = true;
6615     if (!usedVPTPredicationCode) {
6616       // If we have a VPT predication code and we haven't just turned it
6617       // into an operand, then it was a mistake for splitMnemonic to
6618       // separate it from the rest of the mnemonic in the first place,
6619       // and this may lead to wrong disassembly (e.g. scalar floating
6620       // point VCMPE is actually a different instruction from VCMP, so
6621       // we mustn't treat them the same). In that situation, glue it
6622       // back on.
6623       Mnemonic = Name.slice(0, Mnemonic.size() + 1);
6624       Operands.erase(Operands.begin());
6625       Operands.insert(Operands.begin(),
6626                       ARMOperand::CreateToken(Mnemonic, NameLoc));
6627     }
6628   }
6629 
6630     // ARM mode 'blx' need special handling, as the register operand version
6631     // is predicable, but the label operand version is not. So, we can't rely
6632     // on the Mnemonic based checking to correctly figure out when to put
6633     // a k_CondCode operand in the list. If we're trying to match the label
6634     // version, remove the k_CondCode operand here.
6635     if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
6636         static_cast<ARMOperand &>(*Operands[2]).isImm())
6637       Operands.erase(Operands.begin() + 1);
6638 
6639     // Adjust operands of ldrexd/strexd to MCK_GPRPair.
6640     // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
6641     // a single GPRPair reg operand is used in the .td file to replace the two
6642     // GPRs. However, when parsing from asm, the two GRPs cannot be
6643     // automatically
6644     // expressed as a GPRPair, so we have to manually merge them.
6645     // FIXME: We would really like to be able to tablegen'erate this.
6646     if (!isThumb() && Operands.size() > 4 &&
6647         (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
6648          Mnemonic == "stlexd")) {
6649       bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
6650       unsigned Idx = isLoad ? 2 : 3;
6651       ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
6652       ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
6653 
6654       const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID);
6655       // Adjust only if Op1 and Op2 are GPRs.
6656       if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
6657           MRC.contains(Op2.getReg())) {
6658         unsigned Reg1 = Op1.getReg();
6659         unsigned Reg2 = Op2.getReg();
6660         unsigned Rt = MRI->getEncodingValue(Reg1);
6661         unsigned Rt2 = MRI->getEncodingValue(Reg2);
6662 
6663         // Rt2 must be Rt + 1 and Rt must be even.
6664         if (Rt + 1 != Rt2 || (Rt & 1)) {
6665           return Error(Op2.getStartLoc(),
6666                        isLoad ? "destination operands must be sequential"
6667                               : "source operands must be sequential");
6668         }
6669         unsigned NewReg = MRI->getMatchingSuperReg(
6670             Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID)));
6671         Operands[Idx] =
6672             ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
6673         Operands.erase(Operands.begin() + Idx + 1);
6674       }
6675   }
6676 
6677   // GNU Assembler extension (compatibility).
6678   fixupGNULDRDAlias(Mnemonic, Operands);
6679 
6680   // FIXME: As said above, this is all a pretty gross hack.  This instruction
6681   // does not fit with other "subs" and tblgen.
6682   // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
6683   // so the Mnemonic is the original name "subs" and delete the predicate
6684   // operand so it will match the table entry.
6685   if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
6686       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6687       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
6688       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6689       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
6690       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6691     Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
6692     Operands.erase(Operands.begin() + 1);
6693   }
6694   return false;
6695 }
6696 
6697 // Validate context-sensitive operand constraints.
6698 
6699 // return 'true' if register list contains non-low GPR registers,
6700 // 'false' otherwise. If Reg is in the register list or is HiReg, set
6701 // 'containsReg' to true.
6702 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
6703                                  unsigned Reg, unsigned HiReg,
6704                                  bool &containsReg) {
6705   containsReg = false;
6706   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
6707     unsigned OpReg = Inst.getOperand(i).getReg();
6708     if (OpReg == Reg)
6709       containsReg = true;
6710     // Anything other than a low register isn't legal here.
6711     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
6712       return true;
6713   }
6714   return false;
6715 }
6716 
6717 // Check if the specified regisgter is in the register list of the inst,
6718 // starting at the indicated operand number.
6719 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
6720   for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
6721     unsigned OpReg = Inst.getOperand(i).getReg();
6722     if (OpReg == Reg)
6723       return true;
6724   }
6725   return false;
6726 }
6727 
6728 // Return true if instruction has the interesting property of being
6729 // allowed in IT blocks, but not being predicable.
6730 static bool instIsBreakpoint(const MCInst &Inst) {
6731     return Inst.getOpcode() == ARM::tBKPT ||
6732            Inst.getOpcode() == ARM::BKPT ||
6733            Inst.getOpcode() == ARM::tHLT ||
6734            Inst.getOpcode() == ARM::HLT;
6735 }
6736 
6737 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
6738                                        const OperandVector &Operands,
6739                                        unsigned ListNo, bool IsARPop) {
6740   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6741   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6742 
6743   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6744   bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
6745   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6746 
6747   if (!IsARPop && ListContainsSP)
6748     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6749                  "SP may not be in the register list");
6750   else if (ListContainsPC && ListContainsLR)
6751     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6752                  "PC and LR may not be in the register list simultaneously");
6753   return false;
6754 }
6755 
6756 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
6757                                        const OperandVector &Operands,
6758                                        unsigned ListNo) {
6759   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6760   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6761 
6762   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6763   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6764 
6765   if (ListContainsSP && ListContainsPC)
6766     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6767                  "SP and PC may not be in the register list");
6768   else if (ListContainsSP)
6769     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6770                  "SP may not be in the register list");
6771   else if (ListContainsPC)
6772     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6773                  "PC may not be in the register list");
6774   return false;
6775 }
6776 
6777 bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
6778                                     const OperandVector &Operands,
6779                                     bool Load, bool ARMMode, bool Writeback) {
6780   unsigned RtIndex = Load || !Writeback ? 0 : 1;
6781   unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
6782   unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
6783 
6784   if (ARMMode) {
6785     // Rt can't be R14.
6786     if (Rt == 14)
6787       return Error(Operands[3]->getStartLoc(),
6788                   "Rt can't be R14");
6789 
6790     // Rt must be even-numbered.
6791     if ((Rt & 1) == 1)
6792       return Error(Operands[3]->getStartLoc(),
6793                    "Rt must be even-numbered");
6794 
6795     // Rt2 must be Rt + 1.
6796     if (Rt2 != Rt + 1) {
6797       if (Load)
6798         return Error(Operands[3]->getStartLoc(),
6799                      "destination operands must be sequential");
6800       else
6801         return Error(Operands[3]->getStartLoc(),
6802                      "source operands must be sequential");
6803     }
6804 
6805     // FIXME: Diagnose m == 15
6806     // FIXME: Diagnose ldrd with m == t || m == t2.
6807   }
6808 
6809   if (!ARMMode && Load) {
6810     if (Rt2 == Rt)
6811       return Error(Operands[3]->getStartLoc(),
6812                    "destination operands can't be identical");
6813   }
6814 
6815   if (Writeback) {
6816     unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
6817 
6818     if (Rn == Rt || Rn == Rt2) {
6819       if (Load)
6820         return Error(Operands[3]->getStartLoc(),
6821                      "base register needs to be different from destination "
6822                      "registers");
6823       else
6824         return Error(Operands[3]->getStartLoc(),
6825                      "source register and base register can't be identical");
6826     }
6827 
6828     // FIXME: Diagnose ldrd/strd with writeback and n == 15.
6829     // (Except the immediate form of ldrd?)
6830   }
6831 
6832   return false;
6833 }
6834 
6835 static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) {
6836   for (unsigned i = 0; i < MCID.NumOperands; ++i) {
6837     if (ARM::isVpred(MCID.OpInfo[i].OperandType))
6838       return i;
6839   }
6840   return -1;
6841 }
6842 
6843 static bool isVectorPredicable(const MCInstrDesc &MCID) {
6844   return findFirstVectorPredOperandIdx(MCID) != -1;
6845 }
6846 
6847 // FIXME: We would really like to be able to tablegen'erate this.
6848 bool ARMAsmParser::validateInstruction(MCInst &Inst,
6849                                        const OperandVector &Operands) {
6850   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
6851   SMLoc Loc = Operands[0]->getStartLoc();
6852 
6853   // Check the IT block state first.
6854   // NOTE: BKPT and HLT instructions have the interesting property of being
6855   // allowed in IT blocks, but not being predicable. They just always execute.
6856   if (inITBlock() && !instIsBreakpoint(Inst)) {
6857     // The instruction must be predicable.
6858     if (!MCID.isPredicable())
6859       return Error(Loc, "instructions in IT block must be predicable");
6860     ARMCC::CondCodes Cond = ARMCC::CondCodes(
6861         Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
6862     if (Cond != currentITCond()) {
6863       // Find the condition code Operand to get its SMLoc information.
6864       SMLoc CondLoc;
6865       for (unsigned I = 1; I < Operands.size(); ++I)
6866         if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
6867           CondLoc = Operands[I]->getStartLoc();
6868       return Error(CondLoc, "incorrect condition in IT block; got '" +
6869                                 StringRef(ARMCondCodeToString(Cond)) +
6870                                 "', but expected '" +
6871                                 ARMCondCodeToString(currentITCond()) + "'");
6872     }
6873   // Check for non-'al' condition codes outside of the IT block.
6874   } else if (isThumbTwo() && MCID.isPredicable() &&
6875              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6876              ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
6877              Inst.getOpcode() != ARM::t2Bcc &&
6878              Inst.getOpcode() != ARM::t2BFic) {
6879     return Error(Loc, "predicated instructions must be in IT block");
6880   } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
6881              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6882                  ARMCC::AL) {
6883     return Warning(Loc, "predicated instructions should be in IT block");
6884   } else if (!MCID.isPredicable()) {
6885     // Check the instruction doesn't have a predicate operand anyway
6886     // that it's not allowed to use. Sometimes this happens in order
6887     // to keep instructions the same shape even though one cannot
6888     // legally be predicated, e.g. vmul.f16 vs vmul.f32.
6889     for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
6890       if (MCID.OpInfo[i].isPredicate()) {
6891         if (Inst.getOperand(i).getImm() != ARMCC::AL)
6892           return Error(Loc, "instruction is not predicable");
6893         break;
6894       }
6895     }
6896   }
6897 
6898   // PC-setting instructions in an IT block, but not the last instruction of
6899   // the block, are UNPREDICTABLE.
6900   if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
6901     return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
6902   }
6903 
6904   if (inVPTBlock() && !instIsBreakpoint(Inst)) {
6905     unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition);
6906     if (!isVectorPredicable(MCID))
6907       return Error(Loc, "instruction in VPT block must be predicable");
6908     unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm();
6909     unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then;
6910     if (Pred != VPTPred) {
6911       SMLoc PredLoc;
6912       for (unsigned I = 1; I < Operands.size(); ++I)
6913         if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred())
6914           PredLoc = Operands[I]->getStartLoc();
6915       return Error(PredLoc, "incorrect predication in VPT block; got '" +
6916                    StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) +
6917                    "', but expected '" +
6918                    ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'");
6919     }
6920   }
6921   else if (isVectorPredicable(MCID) &&
6922            Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() !=
6923            ARMVCC::None)
6924     return Error(Loc, "VPT predicated instructions must be in VPT block");
6925 
6926   const unsigned Opcode = Inst.getOpcode();
6927   switch (Opcode) {
6928   case ARM::t2IT: {
6929     // Encoding is unpredictable if it ever results in a notional 'NV'
6930     // predicate. Since we don't parse 'NV' directly this means an 'AL'
6931     // predicate with an "else" mask bit.
6932     unsigned Cond = Inst.getOperand(0).getImm();
6933     unsigned Mask = Inst.getOperand(1).getImm();
6934 
6935     // Conditions only allowing a 't' are those with no set bit except
6936     // the lowest-order one that indicates the end of the sequence. In
6937     // other words, powers of 2.
6938     if (Cond == ARMCC::AL && countPopulation(Mask) != 1)
6939       return Error(Loc, "unpredictable IT predicate sequence");
6940     break;
6941   }
6942   case ARM::LDRD:
6943     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
6944                          /*Writeback*/false))
6945       return true;
6946     break;
6947   case ARM::LDRD_PRE:
6948   case ARM::LDRD_POST:
6949     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
6950                          /*Writeback*/true))
6951       return true;
6952     break;
6953   case ARM::t2LDRDi8:
6954     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
6955                          /*Writeback*/false))
6956       return true;
6957     break;
6958   case ARM::t2LDRD_PRE:
6959   case ARM::t2LDRD_POST:
6960     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
6961                          /*Writeback*/true))
6962       return true;
6963     break;
6964   case ARM::t2BXJ: {
6965     const unsigned RmReg = Inst.getOperand(0).getReg();
6966     // Rm = SP is no longer unpredictable in v8-A
6967     if (RmReg == ARM::SP && !hasV8Ops())
6968       return Error(Operands[2]->getStartLoc(),
6969                    "r13 (SP) is an unpredictable operand to BXJ");
6970     return false;
6971   }
6972   case ARM::STRD:
6973     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
6974                          /*Writeback*/false))
6975       return true;
6976     break;
6977   case ARM::STRD_PRE:
6978   case ARM::STRD_POST:
6979     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
6980                          /*Writeback*/true))
6981       return true;
6982     break;
6983   case ARM::t2STRD_PRE:
6984   case ARM::t2STRD_POST:
6985     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false,
6986                          /*Writeback*/true))
6987       return true;
6988     break;
6989   case ARM::STR_PRE_IMM:
6990   case ARM::STR_PRE_REG:
6991   case ARM::t2STR_PRE:
6992   case ARM::STR_POST_IMM:
6993   case ARM::STR_POST_REG:
6994   case ARM::t2STR_POST:
6995   case ARM::STRH_PRE:
6996   case ARM::t2STRH_PRE:
6997   case ARM::STRH_POST:
6998   case ARM::t2STRH_POST:
6999   case ARM::STRB_PRE_IMM:
7000   case ARM::STRB_PRE_REG:
7001   case ARM::t2STRB_PRE:
7002   case ARM::STRB_POST_IMM:
7003   case ARM::STRB_POST_REG:
7004   case ARM::t2STRB_POST: {
7005     // Rt must be different from Rn.
7006     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7007     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7008 
7009     if (Rt == Rn)
7010       return Error(Operands[3]->getStartLoc(),
7011                    "source register and base register can't be identical");
7012     return false;
7013   }
7014   case ARM::LDR_PRE_IMM:
7015   case ARM::LDR_PRE_REG:
7016   case ARM::t2LDR_PRE:
7017   case ARM::LDR_POST_IMM:
7018   case ARM::LDR_POST_REG:
7019   case ARM::t2LDR_POST:
7020   case ARM::LDRH_PRE:
7021   case ARM::t2LDRH_PRE:
7022   case ARM::LDRH_POST:
7023   case ARM::t2LDRH_POST:
7024   case ARM::LDRSH_PRE:
7025   case ARM::t2LDRSH_PRE:
7026   case ARM::LDRSH_POST:
7027   case ARM::t2LDRSH_POST:
7028   case ARM::LDRB_PRE_IMM:
7029   case ARM::LDRB_PRE_REG:
7030   case ARM::t2LDRB_PRE:
7031   case ARM::LDRB_POST_IMM:
7032   case ARM::LDRB_POST_REG:
7033   case ARM::t2LDRB_POST:
7034   case ARM::LDRSB_PRE:
7035   case ARM::t2LDRSB_PRE:
7036   case ARM::LDRSB_POST:
7037   case ARM::t2LDRSB_POST: {
7038     // Rt must be different from Rn.
7039     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7040     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7041 
7042     if (Rt == Rn)
7043       return Error(Operands[3]->getStartLoc(),
7044                    "destination register and base register can't be identical");
7045     return false;
7046   }
7047   case ARM::SBFX:
7048   case ARM::t2SBFX:
7049   case ARM::UBFX:
7050   case ARM::t2UBFX: {
7051     // Width must be in range [1, 32-lsb].
7052     unsigned LSB = Inst.getOperand(2).getImm();
7053     unsigned Widthm1 = Inst.getOperand(3).getImm();
7054     if (Widthm1 >= 32 - LSB)
7055       return Error(Operands[5]->getStartLoc(),
7056                    "bitfield width must be in range [1,32-lsb]");
7057     return false;
7058   }
7059   // Notionally handles ARM::tLDMIA_UPD too.
7060   case ARM::tLDMIA: {
7061     // If we're parsing Thumb2, the .w variant is available and handles
7062     // most cases that are normally illegal for a Thumb1 LDM instruction.
7063     // We'll make the transformation in processInstruction() if necessary.
7064     //
7065     // Thumb LDM instructions are writeback iff the base register is not
7066     // in the register list.
7067     unsigned Rn = Inst.getOperand(0).getReg();
7068     bool HasWritebackToken =
7069         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
7070          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
7071     bool ListContainsBase;
7072     if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
7073       return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
7074                    "registers must be in range r0-r7");
7075     // If we should have writeback, then there should be a '!' token.
7076     if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
7077       return Error(Operands[2]->getStartLoc(),
7078                    "writeback operator '!' expected");
7079     // If we should not have writeback, there must not be a '!'. This is
7080     // true even for the 32-bit wide encodings.
7081     if (ListContainsBase && HasWritebackToken)
7082       return Error(Operands[3]->getStartLoc(),
7083                    "writeback operator '!' not allowed when base register "
7084                    "in register list");
7085 
7086     if (validatetLDMRegList(Inst, Operands, 3))
7087       return true;
7088     break;
7089   }
7090   case ARM::LDMIA_UPD:
7091   case ARM::LDMDB_UPD:
7092   case ARM::LDMIB_UPD:
7093   case ARM::LDMDA_UPD:
7094     // ARM variants loading and updating the same register are only officially
7095     // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
7096     if (!hasV7Ops())
7097       break;
7098     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7099       return Error(Operands.back()->getStartLoc(),
7100                    "writeback register not allowed in register list");
7101     break;
7102   case ARM::t2LDMIA:
7103   case ARM::t2LDMDB:
7104     if (validatetLDMRegList(Inst, Operands, 3))
7105       return true;
7106     break;
7107   case ARM::t2STMIA:
7108   case ARM::t2STMDB:
7109     if (validatetSTMRegList(Inst, Operands, 3))
7110       return true;
7111     break;
7112   case ARM::t2LDMIA_UPD:
7113   case ARM::t2LDMDB_UPD:
7114   case ARM::t2STMIA_UPD:
7115   case ARM::t2STMDB_UPD:
7116     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
7117       return Error(Operands.back()->getStartLoc(),
7118                    "writeback register not allowed in register list");
7119 
7120     if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
7121       if (validatetLDMRegList(Inst, Operands, 3))
7122         return true;
7123     } else {
7124       if (validatetSTMRegList(Inst, Operands, 3))
7125         return true;
7126     }
7127     break;
7128 
7129   case ARM::sysLDMIA_UPD:
7130   case ARM::sysLDMDA_UPD:
7131   case ARM::sysLDMDB_UPD:
7132   case ARM::sysLDMIB_UPD:
7133     if (!listContainsReg(Inst, 3, ARM::PC))
7134       return Error(Operands[4]->getStartLoc(),
7135                    "writeback register only allowed on system LDM "
7136                    "if PC in register-list");
7137     break;
7138   case ARM::sysSTMIA_UPD:
7139   case ARM::sysSTMDA_UPD:
7140   case ARM::sysSTMDB_UPD:
7141   case ARM::sysSTMIB_UPD:
7142     return Error(Operands[2]->getStartLoc(),
7143                  "system STM cannot have writeback register");
7144   case ARM::tMUL:
7145     // The second source operand must be the same register as the destination
7146     // operand.
7147     //
7148     // In this case, we must directly check the parsed operands because the
7149     // cvtThumbMultiply() function is written in such a way that it guarantees
7150     // this first statement is always true for the new Inst.  Essentially, the
7151     // destination is unconditionally copied into the second source operand
7152     // without checking to see if it matches what we actually parsed.
7153     if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
7154                                  ((ARMOperand &)*Operands[5]).getReg()) &&
7155         (((ARMOperand &)*Operands[3]).getReg() !=
7156          ((ARMOperand &)*Operands[4]).getReg())) {
7157       return Error(Operands[3]->getStartLoc(),
7158                    "destination register must match source register");
7159     }
7160     break;
7161 
7162   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
7163   // so only issue a diagnostic for thumb1. The instructions will be
7164   // switched to the t2 encodings in processInstruction() if necessary.
7165   case ARM::tPOP: {
7166     bool ListContainsBase;
7167     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
7168         !isThumbTwo())
7169       return Error(Operands[2]->getStartLoc(),
7170                    "registers must be in range r0-r7 or pc");
7171     if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
7172       return true;
7173     break;
7174   }
7175   case ARM::tPUSH: {
7176     bool ListContainsBase;
7177     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
7178         !isThumbTwo())
7179       return Error(Operands[2]->getStartLoc(),
7180                    "registers must be in range r0-r7 or lr");
7181     if (validatetSTMRegList(Inst, Operands, 2))
7182       return true;
7183     break;
7184   }
7185   case ARM::tSTMIA_UPD: {
7186     bool ListContainsBase, InvalidLowList;
7187     InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
7188                                           0, ListContainsBase);
7189     if (InvalidLowList && !isThumbTwo())
7190       return Error(Operands[4]->getStartLoc(),
7191                    "registers must be in range r0-r7");
7192 
7193     // This would be converted to a 32-bit stm, but that's not valid if the
7194     // writeback register is in the list.
7195     if (InvalidLowList && ListContainsBase)
7196       return Error(Operands[4]->getStartLoc(),
7197                    "writeback operator '!' not allowed when base register "
7198                    "in register list");
7199 
7200     if (validatetSTMRegList(Inst, Operands, 4))
7201       return true;
7202     break;
7203   }
7204   case ARM::tADDrSP:
7205     // If the non-SP source operand and the destination operand are not the
7206     // same, we need thumb2 (for the wide encoding), or we have an error.
7207     if (!isThumbTwo() &&
7208         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
7209       return Error(Operands[4]->getStartLoc(),
7210                    "source register must be the same as destination");
7211     }
7212     break;
7213 
7214   case ARM::t2ADDri:
7215   case ARM::t2ADDri12:
7216   case ARM::t2ADDrr:
7217   case ARM::t2ADDrs:
7218   case ARM::t2SUBri:
7219   case ARM::t2SUBri12:
7220   case ARM::t2SUBrr:
7221   case ARM::t2SUBrs:
7222     if (Inst.getOperand(0).getReg() == ARM::SP &&
7223         Inst.getOperand(1).getReg() != ARM::SP)
7224       return Error(Operands[4]->getStartLoc(),
7225                    "source register must be sp if destination is sp");
7226     break;
7227 
7228   // Final range checking for Thumb unconditional branch instructions.
7229   case ARM::tB:
7230     if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
7231       return Error(Operands[2]->getStartLoc(), "branch target out of range");
7232     break;
7233   case ARM::t2B: {
7234     int op = (Operands[2]->isImm()) ? 2 : 3;
7235     if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
7236       return Error(Operands[op]->getStartLoc(), "branch target out of range");
7237     break;
7238   }
7239   // Final range checking for Thumb conditional branch instructions.
7240   case ARM::tBcc:
7241     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
7242       return Error(Operands[2]->getStartLoc(), "branch target out of range");
7243     break;
7244   case ARM::t2Bcc: {
7245     int Op = (Operands[2]->isImm()) ? 2 : 3;
7246     if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
7247       return Error(Operands[Op]->getStartLoc(), "branch target out of range");
7248     break;
7249   }
7250   case ARM::tCBZ:
7251   case ARM::tCBNZ: {
7252     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
7253       return Error(Operands[2]->getStartLoc(), "branch target out of range");
7254     break;
7255   }
7256   case ARM::MOVi16:
7257   case ARM::MOVTi16:
7258   case ARM::t2MOVi16:
7259   case ARM::t2MOVTi16:
7260     {
7261     // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
7262     // especially when we turn it into a movw and the expression <symbol> does
7263     // not have a :lower16: or :upper16 as part of the expression.  We don't
7264     // want the behavior of silently truncating, which can be unexpected and
7265     // lead to bugs that are difficult to find since this is an easy mistake
7266     // to make.
7267     int i = (Operands[3]->isImm()) ? 3 : 4;
7268     ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
7269     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
7270     if (CE) break;
7271     const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
7272     if (!E) break;
7273     const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
7274     if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
7275                        ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
7276       return Error(
7277           Op.getStartLoc(),
7278           "immediate expression for mov requires :lower16: or :upper16");
7279     break;
7280   }
7281   case ARM::HINT:
7282   case ARM::t2HINT: {
7283     unsigned Imm8 = Inst.getOperand(0).getImm();
7284     unsigned Pred = Inst.getOperand(1).getImm();
7285     // ESB is not predicable (pred must be AL). Without the RAS extension, this
7286     // behaves as any other unallocated hint.
7287     if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
7288       return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
7289                                                "predicable, but condition "
7290                                                "code specified");
7291     if (Imm8 == 0x14 && Pred != ARMCC::AL)
7292       return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
7293                                                "predicable, but condition "
7294                                                "code specified");
7295     break;
7296   }
7297   case ARM::t2WLS: {
7298     int idx = Opcode == ARM::t2WLS ? 3 : 4;
7299     if (!static_cast<ARMOperand &>(*Operands[idx]).isUnsignedOffset<11, 1>())
7300       return Error(Operands[idx]->getStartLoc(),
7301                    "loop end is out of range or not a positive multiple of 2");
7302     break;
7303   }
7304   case ARM::t2LEUpdate: {
7305     if (Inst.getOperand(2).isImm() &&
7306         !(Inst.getOperand(2).getImm() < 0 &&
7307           Inst.getOperand(2).getImm() >= -4094 &&
7308           (Inst.getOperand(2).getImm() & 1) == 0))
7309       return Error(Operands[2]->getStartLoc(),
7310                    "loop start is out of range or not a negative multiple of 2");
7311     break;
7312   }
7313   case ARM::t2BFi:
7314   case ARM::t2BFr:
7315   case ARM::t2BFLi:
7316   case ARM::t2BFLr: {
7317     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() ||
7318         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
7319       return Error(Operands[2]->getStartLoc(),
7320                    "branch location out of range or not a multiple of 2");
7321 
7322     if (Opcode == ARM::t2BFi) {
7323       if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>())
7324         return Error(Operands[3]->getStartLoc(),
7325                      "branch target out of range or not a multiple of 2");
7326     } else if (Opcode == ARM::t2BFLi) {
7327       if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>())
7328         return Error(Operands[3]->getStartLoc(),
7329                      "branch target out of range or not a multiple of 2");
7330     }
7331     break;
7332   }
7333   case ARM::t2BFic: {
7334     if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() ||
7335         (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0))
7336       return Error(Operands[1]->getStartLoc(),
7337                    "branch location out of range or not a multiple of 2");
7338 
7339     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>())
7340       return Error(Operands[2]->getStartLoc(),
7341                    "branch target out of range or not a multiple of 2");
7342 
7343     assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() &&
7344            "branch location and else branch target should either both be "
7345            "immediates or both labels");
7346 
7347     if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) {
7348       int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm();
7349       if (Diff != 4 && Diff != 2)
7350         return Error(
7351             Operands[3]->getStartLoc(),
7352             "else branch target must be 2 or 4 greater than the branch location");
7353     }
7354     break;
7355   }
7356   case ARM::t2CLRM: {
7357     for (unsigned i = 2; i < Inst.getNumOperands(); i++) {
7358       if (Inst.getOperand(i).isReg() &&
7359           !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(
7360               Inst.getOperand(i).getReg())) {
7361         return Error(Operands[2]->getStartLoc(),
7362                      "invalid register in register list. Valid registers are "
7363                      "r0-r12, lr/r14 and APSR.");
7364       }
7365     }
7366     break;
7367   }
7368   case ARM::DSB:
7369   case ARM::t2DSB: {
7370 
7371     if (Inst.getNumOperands() < 2)
7372       break;
7373 
7374     unsigned Option = Inst.getOperand(0).getImm();
7375     unsigned Pred = Inst.getOperand(1).getImm();
7376 
7377     // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
7378     if (Option == 0 && Pred != ARMCC::AL)
7379       return Error(Operands[1]->getStartLoc(),
7380                    "instruction 'ssbb' is not predicable, but condition code "
7381                    "specified");
7382     if (Option == 4 && Pred != ARMCC::AL)
7383       return Error(Operands[1]->getStartLoc(),
7384                    "instruction 'pssbb' is not predicable, but condition code "
7385                    "specified");
7386     break;
7387   }
7388   case ARM::VMOVRRS: {
7389     // Source registers must be sequential.
7390     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
7391     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
7392     if (Sm1 != Sm + 1)
7393       return Error(Operands[5]->getStartLoc(),
7394                    "source operands must be sequential");
7395     break;
7396   }
7397   case ARM::VMOVSRR: {
7398     // Destination registers must be sequential.
7399     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
7400     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
7401     if (Sm1 != Sm + 1)
7402       return Error(Operands[3]->getStartLoc(),
7403                    "destination operands must be sequential");
7404     break;
7405   }
7406   case ARM::VLDMDIA:
7407   case ARM::VSTMDIA: {
7408     ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]);
7409     auto &RegList = Op.getRegList();
7410     if (RegList.size() < 1 || RegList.size() > 16)
7411       return Error(Operands[3]->getStartLoc(),
7412                    "list of registers must be at least 1 and at most 16");
7413     break;
7414   }
7415   }
7416 
7417   return false;
7418 }
7419 
7420 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
7421   switch(Opc) {
7422   default: llvm_unreachable("unexpected opcode!");
7423   // VST1LN
7424   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
7425   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
7426   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
7427   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
7428   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
7429   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
7430   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
7431   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
7432   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
7433 
7434   // VST2LN
7435   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
7436   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
7437   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
7438   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
7439   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
7440 
7441   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
7442   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
7443   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
7444   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
7445   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
7446 
7447   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
7448   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
7449   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
7450   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
7451   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
7452 
7453   // VST3LN
7454   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
7455   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
7456   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
7457   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
7458   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
7459   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
7460   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
7461   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
7462   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
7463   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
7464   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
7465   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
7466   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
7467   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
7468   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
7469 
7470   // VST3
7471   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
7472   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
7473   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
7474   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
7475   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
7476   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
7477   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
7478   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
7479   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
7480   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
7481   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
7482   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
7483   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
7484   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
7485   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
7486   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
7487   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
7488   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
7489 
7490   // VST4LN
7491   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
7492   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
7493   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
7494   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
7495   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
7496   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
7497   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
7498   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
7499   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
7500   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
7501   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
7502   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
7503   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
7504   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
7505   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
7506 
7507   // VST4
7508   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
7509   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
7510   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
7511   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
7512   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
7513   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
7514   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
7515   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
7516   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
7517   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
7518   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
7519   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
7520   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
7521   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
7522   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
7523   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
7524   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
7525   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
7526   }
7527 }
7528 
7529 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
7530   switch(Opc) {
7531   default: llvm_unreachable("unexpected opcode!");
7532   // VLD1LN
7533   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
7534   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
7535   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
7536   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
7537   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
7538   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
7539   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
7540   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
7541   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
7542 
7543   // VLD2LN
7544   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
7545   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
7546   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
7547   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
7548   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
7549   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
7550   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
7551   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
7552   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
7553   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
7554   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
7555   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
7556   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
7557   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
7558   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
7559 
7560   // VLD3DUP
7561   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
7562   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
7563   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
7564   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
7565   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
7566   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
7567   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
7568   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
7569   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
7570   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
7571   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
7572   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
7573   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
7574   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
7575   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
7576   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
7577   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
7578   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
7579 
7580   // VLD3LN
7581   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
7582   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
7583   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
7584   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
7585   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
7586   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
7587   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
7588   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
7589   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
7590   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
7591   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
7592   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
7593   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
7594   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
7595   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
7596 
7597   // VLD3
7598   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
7599   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
7600   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
7601   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
7602   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
7603   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
7604   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
7605   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
7606   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
7607   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
7608   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
7609   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
7610   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
7611   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
7612   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
7613   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
7614   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
7615   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
7616 
7617   // VLD4LN
7618   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
7619   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
7620   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
7621   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
7622   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
7623   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
7624   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
7625   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
7626   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
7627   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
7628   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
7629   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
7630   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
7631   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
7632   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
7633 
7634   // VLD4DUP
7635   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
7636   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
7637   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
7638   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
7639   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
7640   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
7641   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
7642   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
7643   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
7644   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
7645   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
7646   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
7647   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
7648   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
7649   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
7650   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
7651   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
7652   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
7653 
7654   // VLD4
7655   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
7656   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
7657   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
7658   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
7659   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
7660   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
7661   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
7662   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
7663   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
7664   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
7665   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
7666   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
7667   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
7668   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
7669   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
7670   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
7671   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
7672   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
7673   }
7674 }
7675 
7676 bool ARMAsmParser::processInstruction(MCInst &Inst,
7677                                       const OperandVector &Operands,
7678                                       MCStreamer &Out) {
7679   // Check if we have the wide qualifier, because if it's present we
7680   // must avoid selecting a 16-bit thumb instruction.
7681   bool HasWideQualifier = false;
7682   for (auto &Op : Operands) {
7683     ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
7684     if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
7685       HasWideQualifier = true;
7686       break;
7687     }
7688   }
7689 
7690   switch (Inst.getOpcode()) {
7691   // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
7692   case ARM::LDRT_POST:
7693   case ARM::LDRBT_POST: {
7694     const unsigned Opcode =
7695       (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
7696                                            : ARM::LDRBT_POST_IMM;
7697     MCInst TmpInst;
7698     TmpInst.setOpcode(Opcode);
7699     TmpInst.addOperand(Inst.getOperand(0));
7700     TmpInst.addOperand(Inst.getOperand(1));
7701     TmpInst.addOperand(Inst.getOperand(1));
7702     TmpInst.addOperand(MCOperand::createReg(0));
7703     TmpInst.addOperand(MCOperand::createImm(0));
7704     TmpInst.addOperand(Inst.getOperand(2));
7705     TmpInst.addOperand(Inst.getOperand(3));
7706     Inst = TmpInst;
7707     return true;
7708   }
7709   // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
7710   case ARM::STRT_POST:
7711   case ARM::STRBT_POST: {
7712     const unsigned Opcode =
7713       (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
7714                                            : ARM::STRBT_POST_IMM;
7715     MCInst TmpInst;
7716     TmpInst.setOpcode(Opcode);
7717     TmpInst.addOperand(Inst.getOperand(1));
7718     TmpInst.addOperand(Inst.getOperand(0));
7719     TmpInst.addOperand(Inst.getOperand(1));
7720     TmpInst.addOperand(MCOperand::createReg(0));
7721     TmpInst.addOperand(MCOperand::createImm(0));
7722     TmpInst.addOperand(Inst.getOperand(2));
7723     TmpInst.addOperand(Inst.getOperand(3));
7724     Inst = TmpInst;
7725     return true;
7726   }
7727   // Alias for alternate form of 'ADR Rd, #imm' instruction.
7728   case ARM::ADDri: {
7729     if (Inst.getOperand(1).getReg() != ARM::PC ||
7730         Inst.getOperand(5).getReg() != 0 ||
7731         !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
7732       return false;
7733     MCInst TmpInst;
7734     TmpInst.setOpcode(ARM::ADR);
7735     TmpInst.addOperand(Inst.getOperand(0));
7736     if (Inst.getOperand(2).isImm()) {
7737       // Immediate (mod_imm) will be in its encoded form, we must unencode it
7738       // before passing it to the ADR instruction.
7739       unsigned Enc = Inst.getOperand(2).getImm();
7740       TmpInst.addOperand(MCOperand::createImm(
7741         ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
7742     } else {
7743       // Turn PC-relative expression into absolute expression.
7744       // Reading PC provides the start of the current instruction + 8 and
7745       // the transform to adr is biased by that.
7746       MCSymbol *Dot = getContext().createTempSymbol();
7747       Out.EmitLabel(Dot);
7748       const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
7749       const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
7750                                                      MCSymbolRefExpr::VK_None,
7751                                                      getContext());
7752       const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
7753       const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
7754                                                      getContext());
7755       const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
7756                                                         getContext());
7757       TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
7758     }
7759     TmpInst.addOperand(Inst.getOperand(3));
7760     TmpInst.addOperand(Inst.getOperand(4));
7761     Inst = TmpInst;
7762     return true;
7763   }
7764   // Aliases for alternate PC+imm syntax of LDR instructions.
7765   case ARM::t2LDRpcrel:
7766     // Select the narrow version if the immediate will fit.
7767     if (Inst.getOperand(1).getImm() > 0 &&
7768         Inst.getOperand(1).getImm() <= 0xff &&
7769         !HasWideQualifier)
7770       Inst.setOpcode(ARM::tLDRpci);
7771     else
7772       Inst.setOpcode(ARM::t2LDRpci);
7773     return true;
7774   case ARM::t2LDRBpcrel:
7775     Inst.setOpcode(ARM::t2LDRBpci);
7776     return true;
7777   case ARM::t2LDRHpcrel:
7778     Inst.setOpcode(ARM::t2LDRHpci);
7779     return true;
7780   case ARM::t2LDRSBpcrel:
7781     Inst.setOpcode(ARM::t2LDRSBpci);
7782     return true;
7783   case ARM::t2LDRSHpcrel:
7784     Inst.setOpcode(ARM::t2LDRSHpci);
7785     return true;
7786   case ARM::LDRConstPool:
7787   case ARM::tLDRConstPool:
7788   case ARM::t2LDRConstPool: {
7789     // Pseudo instruction ldr rt, =immediate is converted to a
7790     // MOV rt, immediate if immediate is known and representable
7791     // otherwise we create a constant pool entry that we load from.
7792     MCInst TmpInst;
7793     if (Inst.getOpcode() == ARM::LDRConstPool)
7794       TmpInst.setOpcode(ARM::LDRi12);
7795     else if (Inst.getOpcode() == ARM::tLDRConstPool)
7796       TmpInst.setOpcode(ARM::tLDRpci);
7797     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
7798       TmpInst.setOpcode(ARM::t2LDRpci);
7799     const ARMOperand &PoolOperand =
7800       (HasWideQualifier ?
7801        static_cast<ARMOperand &>(*Operands[4]) :
7802        static_cast<ARMOperand &>(*Operands[3]));
7803     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
7804     // If SubExprVal is a constant we may be able to use a MOV
7805     if (isa<MCConstantExpr>(SubExprVal) &&
7806         Inst.getOperand(0).getReg() != ARM::PC &&
7807         Inst.getOperand(0).getReg() != ARM::SP) {
7808       int64_t Value =
7809         (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
7810       bool UseMov  = true;
7811       bool MovHasS = true;
7812       if (Inst.getOpcode() == ARM::LDRConstPool) {
7813         // ARM Constant
7814         if (ARM_AM::getSOImmVal(Value) != -1) {
7815           Value = ARM_AM::getSOImmVal(Value);
7816           TmpInst.setOpcode(ARM::MOVi);
7817         }
7818         else if (ARM_AM::getSOImmVal(~Value) != -1) {
7819           Value = ARM_AM::getSOImmVal(~Value);
7820           TmpInst.setOpcode(ARM::MVNi);
7821         }
7822         else if (hasV6T2Ops() &&
7823                  Value >=0 && Value < 65536) {
7824           TmpInst.setOpcode(ARM::MOVi16);
7825           MovHasS = false;
7826         }
7827         else
7828           UseMov = false;
7829       }
7830       else {
7831         // Thumb/Thumb2 Constant
7832         if (hasThumb2() &&
7833             ARM_AM::getT2SOImmVal(Value) != -1)
7834           TmpInst.setOpcode(ARM::t2MOVi);
7835         else if (hasThumb2() &&
7836                  ARM_AM::getT2SOImmVal(~Value) != -1) {
7837           TmpInst.setOpcode(ARM::t2MVNi);
7838           Value = ~Value;
7839         }
7840         else if (hasV8MBaseline() &&
7841                  Value >=0 && Value < 65536) {
7842           TmpInst.setOpcode(ARM::t2MOVi16);
7843           MovHasS = false;
7844         }
7845         else
7846           UseMov = false;
7847       }
7848       if (UseMov) {
7849         TmpInst.addOperand(Inst.getOperand(0));           // Rt
7850         TmpInst.addOperand(MCOperand::createImm(Value));  // Immediate
7851         TmpInst.addOperand(Inst.getOperand(2));           // CondCode
7852         TmpInst.addOperand(Inst.getOperand(3));           // CondCode
7853         if (MovHasS)
7854           TmpInst.addOperand(MCOperand::createReg(0));    // S
7855         Inst = TmpInst;
7856         return true;
7857       }
7858     }
7859     // No opportunity to use MOV/MVN create constant pool
7860     const MCExpr *CPLoc =
7861       getTargetStreamer().addConstantPoolEntry(SubExprVal,
7862                                                PoolOperand.getStartLoc());
7863     TmpInst.addOperand(Inst.getOperand(0));           // Rt
7864     TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
7865     if (TmpInst.getOpcode() == ARM::LDRi12)
7866       TmpInst.addOperand(MCOperand::createImm(0));    // unused offset
7867     TmpInst.addOperand(Inst.getOperand(2));           // CondCode
7868     TmpInst.addOperand(Inst.getOperand(3));           // CondCode
7869     Inst = TmpInst;
7870     return true;
7871   }
7872   // Handle NEON VST complex aliases.
7873   case ARM::VST1LNdWB_register_Asm_8:
7874   case ARM::VST1LNdWB_register_Asm_16:
7875   case ARM::VST1LNdWB_register_Asm_32: {
7876     MCInst TmpInst;
7877     // Shuffle the operands around so the lane index operand is in the
7878     // right place.
7879     unsigned Spacing;
7880     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7881     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7882     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7883     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7884     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7885     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7886     TmpInst.addOperand(Inst.getOperand(1)); // lane
7887     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7888     TmpInst.addOperand(Inst.getOperand(6));
7889     Inst = TmpInst;
7890     return true;
7891   }
7892 
7893   case ARM::VST2LNdWB_register_Asm_8:
7894   case ARM::VST2LNdWB_register_Asm_16:
7895   case ARM::VST2LNdWB_register_Asm_32:
7896   case ARM::VST2LNqWB_register_Asm_16:
7897   case ARM::VST2LNqWB_register_Asm_32: {
7898     MCInst TmpInst;
7899     // Shuffle the operands around so the lane index operand is in the
7900     // right place.
7901     unsigned Spacing;
7902     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7903     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7904     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7905     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7906     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7907     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7908     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7909                                             Spacing));
7910     TmpInst.addOperand(Inst.getOperand(1)); // lane
7911     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7912     TmpInst.addOperand(Inst.getOperand(6));
7913     Inst = TmpInst;
7914     return true;
7915   }
7916 
7917   case ARM::VST3LNdWB_register_Asm_8:
7918   case ARM::VST3LNdWB_register_Asm_16:
7919   case ARM::VST3LNdWB_register_Asm_32:
7920   case ARM::VST3LNqWB_register_Asm_16:
7921   case ARM::VST3LNqWB_register_Asm_32: {
7922     MCInst TmpInst;
7923     // Shuffle the operands around so the lane index operand is in the
7924     // right place.
7925     unsigned Spacing;
7926     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7927     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7928     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7929     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7930     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7931     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7932     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7933                                             Spacing));
7934     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7935                                             Spacing * 2));
7936     TmpInst.addOperand(Inst.getOperand(1)); // lane
7937     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7938     TmpInst.addOperand(Inst.getOperand(6));
7939     Inst = TmpInst;
7940     return true;
7941   }
7942 
7943   case ARM::VST4LNdWB_register_Asm_8:
7944   case ARM::VST4LNdWB_register_Asm_16:
7945   case ARM::VST4LNdWB_register_Asm_32:
7946   case ARM::VST4LNqWB_register_Asm_16:
7947   case ARM::VST4LNqWB_register_Asm_32: {
7948     MCInst TmpInst;
7949     // Shuffle the operands around so the lane index operand is in the
7950     // right place.
7951     unsigned Spacing;
7952     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7953     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7954     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7955     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7956     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7957     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7958     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7959                                             Spacing));
7960     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7961                                             Spacing * 2));
7962     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7963                                             Spacing * 3));
7964     TmpInst.addOperand(Inst.getOperand(1)); // lane
7965     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7966     TmpInst.addOperand(Inst.getOperand(6));
7967     Inst = TmpInst;
7968     return true;
7969   }
7970 
7971   case ARM::VST1LNdWB_fixed_Asm_8:
7972   case ARM::VST1LNdWB_fixed_Asm_16:
7973   case ARM::VST1LNdWB_fixed_Asm_32: {
7974     MCInst TmpInst;
7975     // Shuffle the operands around so the lane index operand is in the
7976     // right place.
7977     unsigned Spacing;
7978     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7979     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7980     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7981     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7982     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7983     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7984     TmpInst.addOperand(Inst.getOperand(1)); // lane
7985     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7986     TmpInst.addOperand(Inst.getOperand(5));
7987     Inst = TmpInst;
7988     return true;
7989   }
7990 
7991   case ARM::VST2LNdWB_fixed_Asm_8:
7992   case ARM::VST2LNdWB_fixed_Asm_16:
7993   case ARM::VST2LNdWB_fixed_Asm_32:
7994   case ARM::VST2LNqWB_fixed_Asm_16:
7995   case ARM::VST2LNqWB_fixed_Asm_32: {
7996     MCInst TmpInst;
7997     // Shuffle the operands around so the lane index operand is in the
7998     // right place.
7999     unsigned Spacing;
8000     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8001     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8002     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8003     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8004     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8005     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8006     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8007                                             Spacing));
8008     TmpInst.addOperand(Inst.getOperand(1)); // lane
8009     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8010     TmpInst.addOperand(Inst.getOperand(5));
8011     Inst = TmpInst;
8012     return true;
8013   }
8014 
8015   case ARM::VST3LNdWB_fixed_Asm_8:
8016   case ARM::VST3LNdWB_fixed_Asm_16:
8017   case ARM::VST3LNdWB_fixed_Asm_32:
8018   case ARM::VST3LNqWB_fixed_Asm_16:
8019   case ARM::VST3LNqWB_fixed_Asm_32: {
8020     MCInst TmpInst;
8021     // Shuffle the operands around so the lane index operand is in the
8022     // right place.
8023     unsigned Spacing;
8024     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8025     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8026     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8027     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8028     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8029     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8030     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8031                                             Spacing));
8032     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8033                                             Spacing * 2));
8034     TmpInst.addOperand(Inst.getOperand(1)); // lane
8035     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8036     TmpInst.addOperand(Inst.getOperand(5));
8037     Inst = TmpInst;
8038     return true;
8039   }
8040 
8041   case ARM::VST4LNdWB_fixed_Asm_8:
8042   case ARM::VST4LNdWB_fixed_Asm_16:
8043   case ARM::VST4LNdWB_fixed_Asm_32:
8044   case ARM::VST4LNqWB_fixed_Asm_16:
8045   case ARM::VST4LNqWB_fixed_Asm_32: {
8046     MCInst TmpInst;
8047     // Shuffle the operands around so the lane index operand is in the
8048     // right place.
8049     unsigned Spacing;
8050     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8051     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8052     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8053     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8054     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8055     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8056     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8057                                             Spacing));
8058     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8059                                             Spacing * 2));
8060     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8061                                             Spacing * 3));
8062     TmpInst.addOperand(Inst.getOperand(1)); // lane
8063     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8064     TmpInst.addOperand(Inst.getOperand(5));
8065     Inst = TmpInst;
8066     return true;
8067   }
8068 
8069   case ARM::VST1LNdAsm_8:
8070   case ARM::VST1LNdAsm_16:
8071   case ARM::VST1LNdAsm_32: {
8072     MCInst TmpInst;
8073     // Shuffle the operands around so the lane index operand is in the
8074     // right place.
8075     unsigned Spacing;
8076     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8077     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8078     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8079     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8080     TmpInst.addOperand(Inst.getOperand(1)); // lane
8081     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8082     TmpInst.addOperand(Inst.getOperand(5));
8083     Inst = TmpInst;
8084     return true;
8085   }
8086 
8087   case ARM::VST2LNdAsm_8:
8088   case ARM::VST2LNdAsm_16:
8089   case ARM::VST2LNdAsm_32:
8090   case ARM::VST2LNqAsm_16:
8091   case ARM::VST2LNqAsm_32: {
8092     MCInst TmpInst;
8093     // Shuffle the operands around so the lane index operand is in the
8094     // right place.
8095     unsigned Spacing;
8096     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8097     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8098     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8099     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8100     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8101                                             Spacing));
8102     TmpInst.addOperand(Inst.getOperand(1)); // lane
8103     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8104     TmpInst.addOperand(Inst.getOperand(5));
8105     Inst = TmpInst;
8106     return true;
8107   }
8108 
8109   case ARM::VST3LNdAsm_8:
8110   case ARM::VST3LNdAsm_16:
8111   case ARM::VST3LNdAsm_32:
8112   case ARM::VST3LNqAsm_16:
8113   case ARM::VST3LNqAsm_32: {
8114     MCInst TmpInst;
8115     // Shuffle the operands around so the lane index operand is in the
8116     // right place.
8117     unsigned Spacing;
8118     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8119     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8120     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8121     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8122     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8123                                             Spacing));
8124     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8125                                             Spacing * 2));
8126     TmpInst.addOperand(Inst.getOperand(1)); // lane
8127     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8128     TmpInst.addOperand(Inst.getOperand(5));
8129     Inst = TmpInst;
8130     return true;
8131   }
8132 
8133   case ARM::VST4LNdAsm_8:
8134   case ARM::VST4LNdAsm_16:
8135   case ARM::VST4LNdAsm_32:
8136   case ARM::VST4LNqAsm_16:
8137   case ARM::VST4LNqAsm_32: {
8138     MCInst TmpInst;
8139     // Shuffle the operands around so the lane index operand is in the
8140     // right place.
8141     unsigned Spacing;
8142     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8143     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8144     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8145     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8146     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8147                                             Spacing));
8148     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8149                                             Spacing * 2));
8150     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8151                                             Spacing * 3));
8152     TmpInst.addOperand(Inst.getOperand(1)); // lane
8153     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8154     TmpInst.addOperand(Inst.getOperand(5));
8155     Inst = TmpInst;
8156     return true;
8157   }
8158 
8159   // Handle NEON VLD complex aliases.
8160   case ARM::VLD1LNdWB_register_Asm_8:
8161   case ARM::VLD1LNdWB_register_Asm_16:
8162   case ARM::VLD1LNdWB_register_Asm_32: {
8163     MCInst TmpInst;
8164     // Shuffle the operands around so the lane index operand is in the
8165     // right place.
8166     unsigned Spacing;
8167     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8168     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8169     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8170     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8171     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8172     TmpInst.addOperand(Inst.getOperand(4)); // Rm
8173     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8174     TmpInst.addOperand(Inst.getOperand(1)); // lane
8175     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
8176     TmpInst.addOperand(Inst.getOperand(6));
8177     Inst = TmpInst;
8178     return true;
8179   }
8180 
8181   case ARM::VLD2LNdWB_register_Asm_8:
8182   case ARM::VLD2LNdWB_register_Asm_16:
8183   case ARM::VLD2LNdWB_register_Asm_32:
8184   case ARM::VLD2LNqWB_register_Asm_16:
8185   case ARM::VLD2LNqWB_register_Asm_32: {
8186     MCInst TmpInst;
8187     // Shuffle the operands around so the lane index operand is in the
8188     // right place.
8189     unsigned Spacing;
8190     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8191     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8192     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8193                                             Spacing));
8194     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8195     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8196     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8197     TmpInst.addOperand(Inst.getOperand(4)); // Rm
8198     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8199     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8200                                             Spacing));
8201     TmpInst.addOperand(Inst.getOperand(1)); // lane
8202     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
8203     TmpInst.addOperand(Inst.getOperand(6));
8204     Inst = TmpInst;
8205     return true;
8206   }
8207 
8208   case ARM::VLD3LNdWB_register_Asm_8:
8209   case ARM::VLD3LNdWB_register_Asm_16:
8210   case ARM::VLD3LNdWB_register_Asm_32:
8211   case ARM::VLD3LNqWB_register_Asm_16:
8212   case ARM::VLD3LNqWB_register_Asm_32: {
8213     MCInst TmpInst;
8214     // Shuffle the operands around so the lane index operand is in the
8215     // right place.
8216     unsigned Spacing;
8217     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8218     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8219     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8220                                             Spacing));
8221     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8222                                             Spacing * 2));
8223     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8224     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8225     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8226     TmpInst.addOperand(Inst.getOperand(4)); // Rm
8227     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8228     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8229                                             Spacing));
8230     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8231                                             Spacing * 2));
8232     TmpInst.addOperand(Inst.getOperand(1)); // lane
8233     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
8234     TmpInst.addOperand(Inst.getOperand(6));
8235     Inst = TmpInst;
8236     return true;
8237   }
8238 
8239   case ARM::VLD4LNdWB_register_Asm_8:
8240   case ARM::VLD4LNdWB_register_Asm_16:
8241   case ARM::VLD4LNdWB_register_Asm_32:
8242   case ARM::VLD4LNqWB_register_Asm_16:
8243   case ARM::VLD4LNqWB_register_Asm_32: {
8244     MCInst TmpInst;
8245     // Shuffle the operands around so the lane index operand is in the
8246     // right place.
8247     unsigned Spacing;
8248     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8249     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8250     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8251                                             Spacing));
8252     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8253                                             Spacing * 2));
8254     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8255                                             Spacing * 3));
8256     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8257     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8258     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8259     TmpInst.addOperand(Inst.getOperand(4)); // Rm
8260     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8261     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8262                                             Spacing));
8263     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8264                                             Spacing * 2));
8265     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8266                                             Spacing * 3));
8267     TmpInst.addOperand(Inst.getOperand(1)); // lane
8268     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
8269     TmpInst.addOperand(Inst.getOperand(6));
8270     Inst = TmpInst;
8271     return true;
8272   }
8273 
8274   case ARM::VLD1LNdWB_fixed_Asm_8:
8275   case ARM::VLD1LNdWB_fixed_Asm_16:
8276   case ARM::VLD1LNdWB_fixed_Asm_32: {
8277     MCInst TmpInst;
8278     // Shuffle the operands around so the lane index operand is in the
8279     // right place.
8280     unsigned Spacing;
8281     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8282     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8283     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8284     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8285     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8286     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8287     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8288     TmpInst.addOperand(Inst.getOperand(1)); // lane
8289     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8290     TmpInst.addOperand(Inst.getOperand(5));
8291     Inst = TmpInst;
8292     return true;
8293   }
8294 
8295   case ARM::VLD2LNdWB_fixed_Asm_8:
8296   case ARM::VLD2LNdWB_fixed_Asm_16:
8297   case ARM::VLD2LNdWB_fixed_Asm_32:
8298   case ARM::VLD2LNqWB_fixed_Asm_16:
8299   case ARM::VLD2LNqWB_fixed_Asm_32: {
8300     MCInst TmpInst;
8301     // Shuffle the operands around so the lane index operand is in the
8302     // right place.
8303     unsigned Spacing;
8304     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8305     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8306     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8307                                             Spacing));
8308     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8309     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8310     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8311     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8312     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8313     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8314                                             Spacing));
8315     TmpInst.addOperand(Inst.getOperand(1)); // lane
8316     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8317     TmpInst.addOperand(Inst.getOperand(5));
8318     Inst = TmpInst;
8319     return true;
8320   }
8321 
8322   case ARM::VLD3LNdWB_fixed_Asm_8:
8323   case ARM::VLD3LNdWB_fixed_Asm_16:
8324   case ARM::VLD3LNdWB_fixed_Asm_32:
8325   case ARM::VLD3LNqWB_fixed_Asm_16:
8326   case ARM::VLD3LNqWB_fixed_Asm_32: {
8327     MCInst TmpInst;
8328     // Shuffle the operands around so the lane index operand is in the
8329     // right place.
8330     unsigned Spacing;
8331     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8332     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8333     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8334                                             Spacing));
8335     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8336                                             Spacing * 2));
8337     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8338     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8339     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8340     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8341     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8342     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8343                                             Spacing));
8344     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8345                                             Spacing * 2));
8346     TmpInst.addOperand(Inst.getOperand(1)); // lane
8347     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8348     TmpInst.addOperand(Inst.getOperand(5));
8349     Inst = TmpInst;
8350     return true;
8351   }
8352 
8353   case ARM::VLD4LNdWB_fixed_Asm_8:
8354   case ARM::VLD4LNdWB_fixed_Asm_16:
8355   case ARM::VLD4LNdWB_fixed_Asm_32:
8356   case ARM::VLD4LNqWB_fixed_Asm_16:
8357   case ARM::VLD4LNqWB_fixed_Asm_32: {
8358     MCInst TmpInst;
8359     // Shuffle the operands around so the lane index operand is in the
8360     // right place.
8361     unsigned Spacing;
8362     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8363     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8364     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8365                                             Spacing));
8366     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8367                                             Spacing * 2));
8368     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8369                                             Spacing * 3));
8370     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
8371     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8372     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8373     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8374     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8375     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8376                                             Spacing));
8377     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8378                                             Spacing * 2));
8379     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8380                                             Spacing * 3));
8381     TmpInst.addOperand(Inst.getOperand(1)); // lane
8382     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8383     TmpInst.addOperand(Inst.getOperand(5));
8384     Inst = TmpInst;
8385     return true;
8386   }
8387 
8388   case ARM::VLD1LNdAsm_8:
8389   case ARM::VLD1LNdAsm_16:
8390   case ARM::VLD1LNdAsm_32: {
8391     MCInst TmpInst;
8392     // Shuffle the operands around so the lane index operand is in the
8393     // right place.
8394     unsigned Spacing;
8395     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8396     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8397     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8398     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8399     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8400     TmpInst.addOperand(Inst.getOperand(1)); // lane
8401     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8402     TmpInst.addOperand(Inst.getOperand(5));
8403     Inst = TmpInst;
8404     return true;
8405   }
8406 
8407   case ARM::VLD2LNdAsm_8:
8408   case ARM::VLD2LNdAsm_16:
8409   case ARM::VLD2LNdAsm_32:
8410   case ARM::VLD2LNqAsm_16:
8411   case ARM::VLD2LNqAsm_32: {
8412     MCInst TmpInst;
8413     // Shuffle the operands around so the lane index operand is in the
8414     // right place.
8415     unsigned Spacing;
8416     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8417     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8418     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8419                                             Spacing));
8420     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8421     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8422     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8423     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8424                                             Spacing));
8425     TmpInst.addOperand(Inst.getOperand(1)); // lane
8426     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8427     TmpInst.addOperand(Inst.getOperand(5));
8428     Inst = TmpInst;
8429     return true;
8430   }
8431 
8432   case ARM::VLD3LNdAsm_8:
8433   case ARM::VLD3LNdAsm_16:
8434   case ARM::VLD3LNdAsm_32:
8435   case ARM::VLD3LNqAsm_16:
8436   case ARM::VLD3LNqAsm_32: {
8437     MCInst TmpInst;
8438     // Shuffle the operands around so the lane index operand is in the
8439     // right place.
8440     unsigned Spacing;
8441     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8442     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8443     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8444                                             Spacing));
8445     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8446                                             Spacing * 2));
8447     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8448     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8449     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8450     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8451                                             Spacing));
8452     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8453                                             Spacing * 2));
8454     TmpInst.addOperand(Inst.getOperand(1)); // lane
8455     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8456     TmpInst.addOperand(Inst.getOperand(5));
8457     Inst = TmpInst;
8458     return true;
8459   }
8460 
8461   case ARM::VLD4LNdAsm_8:
8462   case ARM::VLD4LNdAsm_16:
8463   case ARM::VLD4LNdAsm_32:
8464   case ARM::VLD4LNqAsm_16:
8465   case ARM::VLD4LNqAsm_32: {
8466     MCInst TmpInst;
8467     // Shuffle the operands around so the lane index operand is in the
8468     // right place.
8469     unsigned Spacing;
8470     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8471     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8472     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8473                                             Spacing));
8474     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8475                                             Spacing * 2));
8476     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8477                                             Spacing * 3));
8478     TmpInst.addOperand(Inst.getOperand(2)); // Rn
8479     TmpInst.addOperand(Inst.getOperand(3)); // alignment
8480     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
8481     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8482                                             Spacing));
8483     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8484                                             Spacing * 2));
8485     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8486                                             Spacing * 3));
8487     TmpInst.addOperand(Inst.getOperand(1)); // lane
8488     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8489     TmpInst.addOperand(Inst.getOperand(5));
8490     Inst = TmpInst;
8491     return true;
8492   }
8493 
8494   // VLD3DUP single 3-element structure to all lanes instructions.
8495   case ARM::VLD3DUPdAsm_8:
8496   case ARM::VLD3DUPdAsm_16:
8497   case ARM::VLD3DUPdAsm_32:
8498   case ARM::VLD3DUPqAsm_8:
8499   case ARM::VLD3DUPqAsm_16:
8500   case ARM::VLD3DUPqAsm_32: {
8501     MCInst TmpInst;
8502     unsigned Spacing;
8503     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8504     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8505     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8506                                             Spacing));
8507     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8508                                             Spacing * 2));
8509     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8510     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8511     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8512     TmpInst.addOperand(Inst.getOperand(4));
8513     Inst = TmpInst;
8514     return true;
8515   }
8516 
8517   case ARM::VLD3DUPdWB_fixed_Asm_8:
8518   case ARM::VLD3DUPdWB_fixed_Asm_16:
8519   case ARM::VLD3DUPdWB_fixed_Asm_32:
8520   case ARM::VLD3DUPqWB_fixed_Asm_8:
8521   case ARM::VLD3DUPqWB_fixed_Asm_16:
8522   case ARM::VLD3DUPqWB_fixed_Asm_32: {
8523     MCInst TmpInst;
8524     unsigned Spacing;
8525     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8526     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8527     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8528                                             Spacing));
8529     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8530                                             Spacing * 2));
8531     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8532     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8533     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8534     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8535     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8536     TmpInst.addOperand(Inst.getOperand(4));
8537     Inst = TmpInst;
8538     return true;
8539   }
8540 
8541   case ARM::VLD3DUPdWB_register_Asm_8:
8542   case ARM::VLD3DUPdWB_register_Asm_16:
8543   case ARM::VLD3DUPdWB_register_Asm_32:
8544   case ARM::VLD3DUPqWB_register_Asm_8:
8545   case ARM::VLD3DUPqWB_register_Asm_16:
8546   case ARM::VLD3DUPqWB_register_Asm_32: {
8547     MCInst TmpInst;
8548     unsigned Spacing;
8549     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8550     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8551     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8552                                             Spacing));
8553     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8554                                             Spacing * 2));
8555     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8556     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8557     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8558     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8559     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8560     TmpInst.addOperand(Inst.getOperand(5));
8561     Inst = TmpInst;
8562     return true;
8563   }
8564 
8565   // VLD3 multiple 3-element structure instructions.
8566   case ARM::VLD3dAsm_8:
8567   case ARM::VLD3dAsm_16:
8568   case ARM::VLD3dAsm_32:
8569   case ARM::VLD3qAsm_8:
8570   case ARM::VLD3qAsm_16:
8571   case ARM::VLD3qAsm_32: {
8572     MCInst TmpInst;
8573     unsigned Spacing;
8574     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8575     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8576     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8577                                             Spacing));
8578     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8579                                             Spacing * 2));
8580     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8581     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8582     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8583     TmpInst.addOperand(Inst.getOperand(4));
8584     Inst = TmpInst;
8585     return true;
8586   }
8587 
8588   case ARM::VLD3dWB_fixed_Asm_8:
8589   case ARM::VLD3dWB_fixed_Asm_16:
8590   case ARM::VLD3dWB_fixed_Asm_32:
8591   case ARM::VLD3qWB_fixed_Asm_8:
8592   case ARM::VLD3qWB_fixed_Asm_16:
8593   case ARM::VLD3qWB_fixed_Asm_32: {
8594     MCInst TmpInst;
8595     unsigned Spacing;
8596     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8597     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8598     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8599                                             Spacing));
8600     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8601                                             Spacing * 2));
8602     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8603     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8604     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8605     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8606     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8607     TmpInst.addOperand(Inst.getOperand(4));
8608     Inst = TmpInst;
8609     return true;
8610   }
8611 
8612   case ARM::VLD3dWB_register_Asm_8:
8613   case ARM::VLD3dWB_register_Asm_16:
8614   case ARM::VLD3dWB_register_Asm_32:
8615   case ARM::VLD3qWB_register_Asm_8:
8616   case ARM::VLD3qWB_register_Asm_16:
8617   case ARM::VLD3qWB_register_Asm_32: {
8618     MCInst TmpInst;
8619     unsigned Spacing;
8620     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8621     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8622     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8623                                             Spacing));
8624     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8625                                             Spacing * 2));
8626     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8627     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8628     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8629     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8630     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8631     TmpInst.addOperand(Inst.getOperand(5));
8632     Inst = TmpInst;
8633     return true;
8634   }
8635 
8636   // VLD4DUP single 3-element structure to all lanes instructions.
8637   case ARM::VLD4DUPdAsm_8:
8638   case ARM::VLD4DUPdAsm_16:
8639   case ARM::VLD4DUPdAsm_32:
8640   case ARM::VLD4DUPqAsm_8:
8641   case ARM::VLD4DUPqAsm_16:
8642   case ARM::VLD4DUPqAsm_32: {
8643     MCInst TmpInst;
8644     unsigned Spacing;
8645     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8646     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8647     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8648                                             Spacing));
8649     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8650                                             Spacing * 2));
8651     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8652                                             Spacing * 3));
8653     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8654     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8655     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8656     TmpInst.addOperand(Inst.getOperand(4));
8657     Inst = TmpInst;
8658     return true;
8659   }
8660 
8661   case ARM::VLD4DUPdWB_fixed_Asm_8:
8662   case ARM::VLD4DUPdWB_fixed_Asm_16:
8663   case ARM::VLD4DUPdWB_fixed_Asm_32:
8664   case ARM::VLD4DUPqWB_fixed_Asm_8:
8665   case ARM::VLD4DUPqWB_fixed_Asm_16:
8666   case ARM::VLD4DUPqWB_fixed_Asm_32: {
8667     MCInst TmpInst;
8668     unsigned Spacing;
8669     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8670     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8671     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8672                                             Spacing));
8673     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8674                                             Spacing * 2));
8675     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8676                                             Spacing * 3));
8677     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8678     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8679     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8680     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8681     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8682     TmpInst.addOperand(Inst.getOperand(4));
8683     Inst = TmpInst;
8684     return true;
8685   }
8686 
8687   case ARM::VLD4DUPdWB_register_Asm_8:
8688   case ARM::VLD4DUPdWB_register_Asm_16:
8689   case ARM::VLD4DUPdWB_register_Asm_32:
8690   case ARM::VLD4DUPqWB_register_Asm_8:
8691   case ARM::VLD4DUPqWB_register_Asm_16:
8692   case ARM::VLD4DUPqWB_register_Asm_32: {
8693     MCInst TmpInst;
8694     unsigned Spacing;
8695     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8696     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8697     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8698                                             Spacing));
8699     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8700                                             Spacing * 2));
8701     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8702                                             Spacing * 3));
8703     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8704     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8705     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8706     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8707     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8708     TmpInst.addOperand(Inst.getOperand(5));
8709     Inst = TmpInst;
8710     return true;
8711   }
8712 
8713   // VLD4 multiple 4-element structure instructions.
8714   case ARM::VLD4dAsm_8:
8715   case ARM::VLD4dAsm_16:
8716   case ARM::VLD4dAsm_32:
8717   case ARM::VLD4qAsm_8:
8718   case ARM::VLD4qAsm_16:
8719   case ARM::VLD4qAsm_32: {
8720     MCInst TmpInst;
8721     unsigned Spacing;
8722     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8723     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8724     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8725                                             Spacing));
8726     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8727                                             Spacing * 2));
8728     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8729                                             Spacing * 3));
8730     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8731     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8732     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8733     TmpInst.addOperand(Inst.getOperand(4));
8734     Inst = TmpInst;
8735     return true;
8736   }
8737 
8738   case ARM::VLD4dWB_fixed_Asm_8:
8739   case ARM::VLD4dWB_fixed_Asm_16:
8740   case ARM::VLD4dWB_fixed_Asm_32:
8741   case ARM::VLD4qWB_fixed_Asm_8:
8742   case ARM::VLD4qWB_fixed_Asm_16:
8743   case ARM::VLD4qWB_fixed_Asm_32: {
8744     MCInst TmpInst;
8745     unsigned Spacing;
8746     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8747     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8748     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8749                                             Spacing));
8750     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8751                                             Spacing * 2));
8752     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8753                                             Spacing * 3));
8754     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8755     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8756     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8757     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8758     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8759     TmpInst.addOperand(Inst.getOperand(4));
8760     Inst = TmpInst;
8761     return true;
8762   }
8763 
8764   case ARM::VLD4dWB_register_Asm_8:
8765   case ARM::VLD4dWB_register_Asm_16:
8766   case ARM::VLD4dWB_register_Asm_32:
8767   case ARM::VLD4qWB_register_Asm_8:
8768   case ARM::VLD4qWB_register_Asm_16:
8769   case ARM::VLD4qWB_register_Asm_32: {
8770     MCInst TmpInst;
8771     unsigned Spacing;
8772     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8773     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8774     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8775                                             Spacing));
8776     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8777                                             Spacing * 2));
8778     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8779                                             Spacing * 3));
8780     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8781     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8782     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8783     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8784     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8785     TmpInst.addOperand(Inst.getOperand(5));
8786     Inst = TmpInst;
8787     return true;
8788   }
8789 
8790   // VST3 multiple 3-element structure instructions.
8791   case ARM::VST3dAsm_8:
8792   case ARM::VST3dAsm_16:
8793   case ARM::VST3dAsm_32:
8794   case ARM::VST3qAsm_8:
8795   case ARM::VST3qAsm_16:
8796   case ARM::VST3qAsm_32: {
8797     MCInst TmpInst;
8798     unsigned Spacing;
8799     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8800     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8801     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8802     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8803     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8804                                             Spacing));
8805     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8806                                             Spacing * 2));
8807     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8808     TmpInst.addOperand(Inst.getOperand(4));
8809     Inst = TmpInst;
8810     return true;
8811   }
8812 
8813   case ARM::VST3dWB_fixed_Asm_8:
8814   case ARM::VST3dWB_fixed_Asm_16:
8815   case ARM::VST3dWB_fixed_Asm_32:
8816   case ARM::VST3qWB_fixed_Asm_8:
8817   case ARM::VST3qWB_fixed_Asm_16:
8818   case ARM::VST3qWB_fixed_Asm_32: {
8819     MCInst TmpInst;
8820     unsigned Spacing;
8821     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8822     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8823     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8824     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8825     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8826     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8827     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8828                                             Spacing));
8829     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8830                                             Spacing * 2));
8831     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8832     TmpInst.addOperand(Inst.getOperand(4));
8833     Inst = TmpInst;
8834     return true;
8835   }
8836 
8837   case ARM::VST3dWB_register_Asm_8:
8838   case ARM::VST3dWB_register_Asm_16:
8839   case ARM::VST3dWB_register_Asm_32:
8840   case ARM::VST3qWB_register_Asm_8:
8841   case ARM::VST3qWB_register_Asm_16:
8842   case ARM::VST3qWB_register_Asm_32: {
8843     MCInst TmpInst;
8844     unsigned Spacing;
8845     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8846     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8847     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8848     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8849     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8850     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8851     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8852                                             Spacing));
8853     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8854                                             Spacing * 2));
8855     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8856     TmpInst.addOperand(Inst.getOperand(5));
8857     Inst = TmpInst;
8858     return true;
8859   }
8860 
8861   // VST4 multiple 3-element structure instructions.
8862   case ARM::VST4dAsm_8:
8863   case ARM::VST4dAsm_16:
8864   case ARM::VST4dAsm_32:
8865   case ARM::VST4qAsm_8:
8866   case ARM::VST4qAsm_16:
8867   case ARM::VST4qAsm_32: {
8868     MCInst TmpInst;
8869     unsigned Spacing;
8870     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8871     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8872     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8873     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8874     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8875                                             Spacing));
8876     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8877                                             Spacing * 2));
8878     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8879                                             Spacing * 3));
8880     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8881     TmpInst.addOperand(Inst.getOperand(4));
8882     Inst = TmpInst;
8883     return true;
8884   }
8885 
8886   case ARM::VST4dWB_fixed_Asm_8:
8887   case ARM::VST4dWB_fixed_Asm_16:
8888   case ARM::VST4dWB_fixed_Asm_32:
8889   case ARM::VST4qWB_fixed_Asm_8:
8890   case ARM::VST4qWB_fixed_Asm_16:
8891   case ARM::VST4qWB_fixed_Asm_32: {
8892     MCInst TmpInst;
8893     unsigned Spacing;
8894     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8895     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8896     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8897     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8898     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8899     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8900     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8901                                             Spacing));
8902     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8903                                             Spacing * 2));
8904     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8905                                             Spacing * 3));
8906     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8907     TmpInst.addOperand(Inst.getOperand(4));
8908     Inst = TmpInst;
8909     return true;
8910   }
8911 
8912   case ARM::VST4dWB_register_Asm_8:
8913   case ARM::VST4dWB_register_Asm_16:
8914   case ARM::VST4dWB_register_Asm_32:
8915   case ARM::VST4qWB_register_Asm_8:
8916   case ARM::VST4qWB_register_Asm_16:
8917   case ARM::VST4qWB_register_Asm_32: {
8918     MCInst TmpInst;
8919     unsigned Spacing;
8920     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8921     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8922     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8923     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8924     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8925     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8926     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8927                                             Spacing));
8928     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8929                                             Spacing * 2));
8930     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8931                                             Spacing * 3));
8932     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8933     TmpInst.addOperand(Inst.getOperand(5));
8934     Inst = TmpInst;
8935     return true;
8936   }
8937 
8938   // Handle encoding choice for the shift-immediate instructions.
8939   case ARM::t2LSLri:
8940   case ARM::t2LSRri:
8941   case ARM::t2ASRri:
8942     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8943         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8944         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8945         !HasWideQualifier) {
8946       unsigned NewOpc;
8947       switch (Inst.getOpcode()) {
8948       default: llvm_unreachable("unexpected opcode");
8949       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
8950       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
8951       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
8952       }
8953       // The Thumb1 operands aren't in the same order. Awesome, eh?
8954       MCInst TmpInst;
8955       TmpInst.setOpcode(NewOpc);
8956       TmpInst.addOperand(Inst.getOperand(0));
8957       TmpInst.addOperand(Inst.getOperand(5));
8958       TmpInst.addOperand(Inst.getOperand(1));
8959       TmpInst.addOperand(Inst.getOperand(2));
8960       TmpInst.addOperand(Inst.getOperand(3));
8961       TmpInst.addOperand(Inst.getOperand(4));
8962       Inst = TmpInst;
8963       return true;
8964     }
8965     return false;
8966 
8967   // Handle the Thumb2 mode MOV complex aliases.
8968   case ARM::t2MOVsr:
8969   case ARM::t2MOVSsr: {
8970     // Which instruction to expand to depends on the CCOut operand and
8971     // whether we're in an IT block if the register operands are low
8972     // registers.
8973     bool isNarrow = false;
8974     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8975         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8976         isARMLowRegister(Inst.getOperand(2).getReg()) &&
8977         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8978         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
8979         !HasWideQualifier)
8980       isNarrow = true;
8981     MCInst TmpInst;
8982     unsigned newOpc;
8983     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
8984     default: llvm_unreachable("unexpected opcode!");
8985     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
8986     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
8987     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
8988     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
8989     }
8990     TmpInst.setOpcode(newOpc);
8991     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8992     if (isNarrow)
8993       TmpInst.addOperand(MCOperand::createReg(
8994           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
8995     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8996     TmpInst.addOperand(Inst.getOperand(2)); // Rm
8997     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8998     TmpInst.addOperand(Inst.getOperand(5));
8999     if (!isNarrow)
9000       TmpInst.addOperand(MCOperand::createReg(
9001           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
9002     Inst = TmpInst;
9003     return true;
9004   }
9005   case ARM::t2MOVsi:
9006   case ARM::t2MOVSsi: {
9007     // Which instruction to expand to depends on the CCOut operand and
9008     // whether we're in an IT block if the register operands are low
9009     // registers.
9010     bool isNarrow = false;
9011     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
9012         isARMLowRegister(Inst.getOperand(1).getReg()) &&
9013         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
9014         !HasWideQualifier)
9015       isNarrow = true;
9016     MCInst TmpInst;
9017     unsigned newOpc;
9018     unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
9019     unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
9020     bool isMov = false;
9021     // MOV rd, rm, LSL #0 is actually a MOV instruction
9022     if (Shift == ARM_AM::lsl && Amount == 0) {
9023       isMov = true;
9024       // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
9025       // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
9026       // unpredictable in an IT block so the 32-bit encoding T3 has to be used
9027       // instead.
9028       if (inITBlock()) {
9029         isNarrow = false;
9030       }
9031       newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
9032     } else {
9033       switch(Shift) {
9034       default: llvm_unreachable("unexpected opcode!");
9035       case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
9036       case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
9037       case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
9038       case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
9039       case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
9040       }
9041     }
9042     if (Amount == 32) Amount = 0;
9043     TmpInst.setOpcode(newOpc);
9044     TmpInst.addOperand(Inst.getOperand(0)); // Rd
9045     if (isNarrow && !isMov)
9046       TmpInst.addOperand(MCOperand::createReg(
9047           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
9048     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9049     if (newOpc != ARM::t2RRX && !isMov)
9050       TmpInst.addOperand(MCOperand::createImm(Amount));
9051     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9052     TmpInst.addOperand(Inst.getOperand(4));
9053     if (!isNarrow)
9054       TmpInst.addOperand(MCOperand::createReg(
9055           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
9056     Inst = TmpInst;
9057     return true;
9058   }
9059   // Handle the ARM mode MOV complex aliases.
9060   case ARM::ASRr:
9061   case ARM::LSRr:
9062   case ARM::LSLr:
9063   case ARM::RORr: {
9064     ARM_AM::ShiftOpc ShiftTy;
9065     switch(Inst.getOpcode()) {
9066     default: llvm_unreachable("unexpected opcode!");
9067     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
9068     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
9069     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
9070     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
9071     }
9072     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
9073     MCInst TmpInst;
9074     TmpInst.setOpcode(ARM::MOVsr);
9075     TmpInst.addOperand(Inst.getOperand(0)); // Rd
9076     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9077     TmpInst.addOperand(Inst.getOperand(2)); // Rm
9078     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
9079     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9080     TmpInst.addOperand(Inst.getOperand(4));
9081     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
9082     Inst = TmpInst;
9083     return true;
9084   }
9085   case ARM::ASRi:
9086   case ARM::LSRi:
9087   case ARM::LSLi:
9088   case ARM::RORi: {
9089     ARM_AM::ShiftOpc ShiftTy;
9090     switch(Inst.getOpcode()) {
9091     default: llvm_unreachable("unexpected opcode!");
9092     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
9093     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
9094     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
9095     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
9096     }
9097     // A shift by zero is a plain MOVr, not a MOVsi.
9098     unsigned Amt = Inst.getOperand(2).getImm();
9099     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
9100     // A shift by 32 should be encoded as 0 when permitted
9101     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
9102       Amt = 0;
9103     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
9104     MCInst TmpInst;
9105     TmpInst.setOpcode(Opc);
9106     TmpInst.addOperand(Inst.getOperand(0)); // Rd
9107     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9108     if (Opc == ARM::MOVsi)
9109       TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
9110     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
9111     TmpInst.addOperand(Inst.getOperand(4));
9112     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
9113     Inst = TmpInst;
9114     return true;
9115   }
9116   case ARM::RRXi: {
9117     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
9118     MCInst TmpInst;
9119     TmpInst.setOpcode(ARM::MOVsi);
9120     TmpInst.addOperand(Inst.getOperand(0)); // Rd
9121     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9122     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
9123     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9124     TmpInst.addOperand(Inst.getOperand(3));
9125     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
9126     Inst = TmpInst;
9127     return true;
9128   }
9129   case ARM::t2LDMIA_UPD: {
9130     // If this is a load of a single register, then we should use
9131     // a post-indexed LDR instruction instead, per the ARM ARM.
9132     if (Inst.getNumOperands() != 5)
9133       return false;
9134     MCInst TmpInst;
9135     TmpInst.setOpcode(ARM::t2LDR_POST);
9136     TmpInst.addOperand(Inst.getOperand(4)); // Rt
9137     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
9138     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9139     TmpInst.addOperand(MCOperand::createImm(4));
9140     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9141     TmpInst.addOperand(Inst.getOperand(3));
9142     Inst = TmpInst;
9143     return true;
9144   }
9145   case ARM::t2STMDB_UPD: {
9146     // If this is a store of a single register, then we should use
9147     // a pre-indexed STR instruction instead, per the ARM ARM.
9148     if (Inst.getNumOperands() != 5)
9149       return false;
9150     MCInst TmpInst;
9151     TmpInst.setOpcode(ARM::t2STR_PRE);
9152     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
9153     TmpInst.addOperand(Inst.getOperand(4)); // Rt
9154     TmpInst.addOperand(Inst.getOperand(1)); // Rn
9155     TmpInst.addOperand(MCOperand::createImm(-4));
9156     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9157     TmpInst.addOperand(Inst.getOperand(3));
9158     Inst = TmpInst;
9159     return true;
9160   }
9161   case ARM::LDMIA_UPD:
9162     // If this is a load of a single register via a 'pop', then we should use
9163     // a post-indexed LDR instruction instead, per the ARM ARM.
9164     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
9165         Inst.getNumOperands() == 5) {
9166       MCInst TmpInst;
9167       TmpInst.setOpcode(ARM::LDR_POST_IMM);
9168       TmpInst.addOperand(Inst.getOperand(4)); // Rt
9169       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
9170       TmpInst.addOperand(Inst.getOperand(1)); // Rn
9171       TmpInst.addOperand(MCOperand::createReg(0));  // am2offset
9172       TmpInst.addOperand(MCOperand::createImm(4));
9173       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9174       TmpInst.addOperand(Inst.getOperand(3));
9175       Inst = TmpInst;
9176       return true;
9177     }
9178     break;
9179   case ARM::STMDB_UPD:
9180     // If this is a store of a single register via a 'push', then we should use
9181     // a pre-indexed STR instruction instead, per the ARM ARM.
9182     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
9183         Inst.getNumOperands() == 5) {
9184       MCInst TmpInst;
9185       TmpInst.setOpcode(ARM::STR_PRE_IMM);
9186       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
9187       TmpInst.addOperand(Inst.getOperand(4)); // Rt
9188       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
9189       TmpInst.addOperand(MCOperand::createImm(-4));
9190       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
9191       TmpInst.addOperand(Inst.getOperand(3));
9192       Inst = TmpInst;
9193     }
9194     break;
9195   case ARM::t2ADDri12:
9196     // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
9197     // mnemonic was used (not "addw"), encoding T3 is preferred.
9198     if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" ||
9199         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
9200       break;
9201     Inst.setOpcode(ARM::t2ADDri);
9202     Inst.addOperand(MCOperand::createReg(0)); // cc_out
9203     break;
9204   case ARM::t2SUBri12:
9205     // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
9206     // mnemonic was used (not "subw"), encoding T3 is preferred.
9207     if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" ||
9208         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
9209       break;
9210     Inst.setOpcode(ARM::t2SUBri);
9211     Inst.addOperand(MCOperand::createReg(0)); // cc_out
9212     break;
9213   case ARM::tADDi8:
9214     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
9215     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
9216     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
9217     // to encoding T1 if <Rd> is omitted."
9218     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
9219       Inst.setOpcode(ARM::tADDi3);
9220       return true;
9221     }
9222     break;
9223   case ARM::tSUBi8:
9224     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
9225     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
9226     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
9227     // to encoding T1 if <Rd> is omitted."
9228     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
9229       Inst.setOpcode(ARM::tSUBi3);
9230       return true;
9231     }
9232     break;
9233   case ARM::t2ADDri:
9234   case ARM::t2SUBri: {
9235     // If the destination and first source operand are the same, and
9236     // the flags are compatible with the current IT status, use encoding T2
9237     // instead of T3. For compatibility with the system 'as'. Make sure the
9238     // wide encoding wasn't explicit.
9239     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
9240         !isARMLowRegister(Inst.getOperand(0).getReg()) ||
9241         (Inst.getOperand(2).isImm() &&
9242          (unsigned)Inst.getOperand(2).getImm() > 255) ||
9243         Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
9244         HasWideQualifier)
9245       break;
9246     MCInst TmpInst;
9247     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
9248                       ARM::tADDi8 : ARM::tSUBi8);
9249     TmpInst.addOperand(Inst.getOperand(0));
9250     TmpInst.addOperand(Inst.getOperand(5));
9251     TmpInst.addOperand(Inst.getOperand(0));
9252     TmpInst.addOperand(Inst.getOperand(2));
9253     TmpInst.addOperand(Inst.getOperand(3));
9254     TmpInst.addOperand(Inst.getOperand(4));
9255     Inst = TmpInst;
9256     return true;
9257   }
9258   case ARM::t2ADDrr: {
9259     // If the destination and first source operand are the same, and
9260     // there's no setting of the flags, use encoding T2 instead of T3.
9261     // Note that this is only for ADD, not SUB. This mirrors the system
9262     // 'as' behaviour.  Also take advantage of ADD being commutative.
9263     // Make sure the wide encoding wasn't explicit.
9264     bool Swap = false;
9265     auto DestReg = Inst.getOperand(0).getReg();
9266     bool Transform = DestReg == Inst.getOperand(1).getReg();
9267     if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
9268       Transform = true;
9269       Swap = true;
9270     }
9271     if (!Transform ||
9272         Inst.getOperand(5).getReg() != 0 ||
9273         HasWideQualifier)
9274       break;
9275     MCInst TmpInst;
9276     TmpInst.setOpcode(ARM::tADDhirr);
9277     TmpInst.addOperand(Inst.getOperand(0));
9278     TmpInst.addOperand(Inst.getOperand(0));
9279     TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
9280     TmpInst.addOperand(Inst.getOperand(3));
9281     TmpInst.addOperand(Inst.getOperand(4));
9282     Inst = TmpInst;
9283     return true;
9284   }
9285   case ARM::tADDrSP:
9286     // If the non-SP source operand and the destination operand are not the
9287     // same, we need to use the 32-bit encoding if it's available.
9288     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
9289       Inst.setOpcode(ARM::t2ADDrr);
9290       Inst.addOperand(MCOperand::createReg(0)); // cc_out
9291       return true;
9292     }
9293     break;
9294   case ARM::tB:
9295     // A Thumb conditional branch outside of an IT block is a tBcc.
9296     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
9297       Inst.setOpcode(ARM::tBcc);
9298       return true;
9299     }
9300     break;
9301   case ARM::t2B:
9302     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
9303     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
9304       Inst.setOpcode(ARM::t2Bcc);
9305       return true;
9306     }
9307     break;
9308   case ARM::t2Bcc:
9309     // If the conditional is AL or we're in an IT block, we really want t2B.
9310     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
9311       Inst.setOpcode(ARM::t2B);
9312       return true;
9313     }
9314     break;
9315   case ARM::tBcc:
9316     // If the conditional is AL, we really want tB.
9317     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
9318       Inst.setOpcode(ARM::tB);
9319       return true;
9320     }
9321     break;
9322   case ARM::tLDMIA: {
9323     // If the register list contains any high registers, or if the writeback
9324     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
9325     // instead if we're in Thumb2. Otherwise, this should have generated
9326     // an error in validateInstruction().
9327     unsigned Rn = Inst.getOperand(0).getReg();
9328     bool hasWritebackToken =
9329         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
9330          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
9331     bool listContainsBase;
9332     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
9333         (!listContainsBase && !hasWritebackToken) ||
9334         (listContainsBase && hasWritebackToken)) {
9335       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
9336       assert(isThumbTwo());
9337       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
9338       // If we're switching to the updating version, we need to insert
9339       // the writeback tied operand.
9340       if (hasWritebackToken)
9341         Inst.insert(Inst.begin(),
9342                     MCOperand::createReg(Inst.getOperand(0).getReg()));
9343       return true;
9344     }
9345     break;
9346   }
9347   case ARM::tSTMIA_UPD: {
9348     // If the register list contains any high registers, we need to use
9349     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
9350     // should have generated an error in validateInstruction().
9351     unsigned Rn = Inst.getOperand(0).getReg();
9352     bool listContainsBase;
9353     if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
9354       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
9355       assert(isThumbTwo());
9356       Inst.setOpcode(ARM::t2STMIA_UPD);
9357       return true;
9358     }
9359     break;
9360   }
9361   case ARM::tPOP: {
9362     bool listContainsBase;
9363     // If the register list contains any high registers, we need to use
9364     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
9365     // should have generated an error in validateInstruction().
9366     if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
9367       return false;
9368     assert(isThumbTwo());
9369     Inst.setOpcode(ARM::t2LDMIA_UPD);
9370     // Add the base register and writeback operands.
9371     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
9372     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
9373     return true;
9374   }
9375   case ARM::tPUSH: {
9376     bool listContainsBase;
9377     if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
9378       return false;
9379     assert(isThumbTwo());
9380     Inst.setOpcode(ARM::t2STMDB_UPD);
9381     // Add the base register and writeback operands.
9382     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
9383     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
9384     return true;
9385   }
9386   case ARM::t2MOVi:
9387     // If we can use the 16-bit encoding and the user didn't explicitly
9388     // request the 32-bit variant, transform it here.
9389     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
9390         (Inst.getOperand(1).isImm() &&
9391          (unsigned)Inst.getOperand(1).getImm() <= 255) &&
9392         Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
9393         !HasWideQualifier) {
9394       // The operands aren't in the same order for tMOVi8...
9395       MCInst TmpInst;
9396       TmpInst.setOpcode(ARM::tMOVi8);
9397       TmpInst.addOperand(Inst.getOperand(0));
9398       TmpInst.addOperand(Inst.getOperand(4));
9399       TmpInst.addOperand(Inst.getOperand(1));
9400       TmpInst.addOperand(Inst.getOperand(2));
9401       TmpInst.addOperand(Inst.getOperand(3));
9402       Inst = TmpInst;
9403       return true;
9404     }
9405     break;
9406 
9407   case ARM::t2MOVr:
9408     // If we can use the 16-bit encoding and the user didn't explicitly
9409     // request the 32-bit variant, transform it here.
9410     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
9411         isARMLowRegister(Inst.getOperand(1).getReg()) &&
9412         Inst.getOperand(2).getImm() == ARMCC::AL &&
9413         Inst.getOperand(4).getReg() == ARM::CPSR &&
9414         !HasWideQualifier) {
9415       // The operands aren't the same for tMOV[S]r... (no cc_out)
9416       MCInst TmpInst;
9417       TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
9418       TmpInst.addOperand(Inst.getOperand(0));
9419       TmpInst.addOperand(Inst.getOperand(1));
9420       TmpInst.addOperand(Inst.getOperand(2));
9421       TmpInst.addOperand(Inst.getOperand(3));
9422       Inst = TmpInst;
9423       return true;
9424     }
9425     break;
9426 
9427   case ARM::t2SXTH:
9428   case ARM::t2SXTB:
9429   case ARM::t2UXTH:
9430   case ARM::t2UXTB:
9431     // If we can use the 16-bit encoding and the user didn't explicitly
9432     // request the 32-bit variant, transform it here.
9433     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
9434         isARMLowRegister(Inst.getOperand(1).getReg()) &&
9435         Inst.getOperand(2).getImm() == 0 &&
9436         !HasWideQualifier) {
9437       unsigned NewOpc;
9438       switch (Inst.getOpcode()) {
9439       default: llvm_unreachable("Illegal opcode!");
9440       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
9441       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
9442       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
9443       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
9444       }
9445       // The operands aren't the same for thumb1 (no rotate operand).
9446       MCInst TmpInst;
9447       TmpInst.setOpcode(NewOpc);
9448       TmpInst.addOperand(Inst.getOperand(0));
9449       TmpInst.addOperand(Inst.getOperand(1));
9450       TmpInst.addOperand(Inst.getOperand(3));
9451       TmpInst.addOperand(Inst.getOperand(4));
9452       Inst = TmpInst;
9453       return true;
9454     }
9455     break;
9456 
9457   case ARM::MOVsi: {
9458     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
9459     // rrx shifts and asr/lsr of #32 is encoded as 0
9460     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
9461       return false;
9462     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
9463       // Shifting by zero is accepted as a vanilla 'MOVr'
9464       MCInst TmpInst;
9465       TmpInst.setOpcode(ARM::MOVr);
9466       TmpInst.addOperand(Inst.getOperand(0));
9467       TmpInst.addOperand(Inst.getOperand(1));
9468       TmpInst.addOperand(Inst.getOperand(3));
9469       TmpInst.addOperand(Inst.getOperand(4));
9470       TmpInst.addOperand(Inst.getOperand(5));
9471       Inst = TmpInst;
9472       return true;
9473     }
9474     return false;
9475   }
9476   case ARM::ANDrsi:
9477   case ARM::ORRrsi:
9478   case ARM::EORrsi:
9479   case ARM::BICrsi:
9480   case ARM::SUBrsi:
9481   case ARM::ADDrsi: {
9482     unsigned newOpc;
9483     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
9484     if (SOpc == ARM_AM::rrx) return false;
9485     switch (Inst.getOpcode()) {
9486     default: llvm_unreachable("unexpected opcode!");
9487     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
9488     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
9489     case ARM::EORrsi: newOpc = ARM::EORrr; break;
9490     case ARM::BICrsi: newOpc = ARM::BICrr; break;
9491     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
9492     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
9493     }
9494     // If the shift is by zero, use the non-shifted instruction definition.
9495     // The exception is for right shifts, where 0 == 32
9496     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
9497         !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
9498       MCInst TmpInst;
9499       TmpInst.setOpcode(newOpc);
9500       TmpInst.addOperand(Inst.getOperand(0));
9501       TmpInst.addOperand(Inst.getOperand(1));
9502       TmpInst.addOperand(Inst.getOperand(2));
9503       TmpInst.addOperand(Inst.getOperand(4));
9504       TmpInst.addOperand(Inst.getOperand(5));
9505       TmpInst.addOperand(Inst.getOperand(6));
9506       Inst = TmpInst;
9507       return true;
9508     }
9509     return false;
9510   }
9511   case ARM::ITasm:
9512   case ARM::t2IT: {
9513     // Set up the IT block state according to the IT instruction we just
9514     // matched.
9515     assert(!inITBlock() && "nested IT blocks?!");
9516     startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()),
9517                          Inst.getOperand(1).getImm());
9518     break;
9519   }
9520   case ARM::t2LSLrr:
9521   case ARM::t2LSRrr:
9522   case ARM::t2ASRrr:
9523   case ARM::t2SBCrr:
9524   case ARM::t2RORrr:
9525   case ARM::t2BICrr:
9526     // Assemblers should use the narrow encodings of these instructions when permissible.
9527     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
9528          isARMLowRegister(Inst.getOperand(2).getReg())) &&
9529         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
9530         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
9531         !HasWideQualifier) {
9532       unsigned NewOpc;
9533       switch (Inst.getOpcode()) {
9534         default: llvm_unreachable("unexpected opcode");
9535         case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
9536         case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
9537         case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
9538         case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
9539         case ARM::t2RORrr: NewOpc = ARM::tROR; break;
9540         case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
9541       }
9542       MCInst TmpInst;
9543       TmpInst.setOpcode(NewOpc);
9544       TmpInst.addOperand(Inst.getOperand(0));
9545       TmpInst.addOperand(Inst.getOperand(5));
9546       TmpInst.addOperand(Inst.getOperand(1));
9547       TmpInst.addOperand(Inst.getOperand(2));
9548       TmpInst.addOperand(Inst.getOperand(3));
9549       TmpInst.addOperand(Inst.getOperand(4));
9550       Inst = TmpInst;
9551       return true;
9552     }
9553     return false;
9554 
9555   case ARM::t2ANDrr:
9556   case ARM::t2EORrr:
9557   case ARM::t2ADCrr:
9558   case ARM::t2ORRrr:
9559     // Assemblers should use the narrow encodings of these instructions when permissible.
9560     // These instructions are special in that they are commutable, so shorter encodings
9561     // are available more often.
9562     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
9563          isARMLowRegister(Inst.getOperand(2).getReg())) &&
9564         (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
9565          Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
9566         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
9567         !HasWideQualifier) {
9568       unsigned NewOpc;
9569       switch (Inst.getOpcode()) {
9570         default: llvm_unreachable("unexpected opcode");
9571         case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
9572         case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
9573         case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
9574         case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
9575       }
9576       MCInst TmpInst;
9577       TmpInst.setOpcode(NewOpc);
9578       TmpInst.addOperand(Inst.getOperand(0));
9579       TmpInst.addOperand(Inst.getOperand(5));
9580       if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
9581         TmpInst.addOperand(Inst.getOperand(1));
9582         TmpInst.addOperand(Inst.getOperand(2));
9583       } else {
9584         TmpInst.addOperand(Inst.getOperand(2));
9585         TmpInst.addOperand(Inst.getOperand(1));
9586       }
9587       TmpInst.addOperand(Inst.getOperand(3));
9588       TmpInst.addOperand(Inst.getOperand(4));
9589       Inst = TmpInst;
9590       return true;
9591     }
9592     return false;
9593   case ARM::t2VPST:
9594   case ARM::t2VPTv16i8:
9595   case ARM::t2VPTv8i16:
9596   case ARM::t2VPTv4i32:
9597   case ARM::t2VPTv16u8:
9598   case ARM::t2VPTv8u16:
9599   case ARM::t2VPTv4u32:
9600   case ARM::t2VPTv16s8:
9601   case ARM::t2VPTv8s16:
9602   case ARM::t2VPTv4s32:
9603   case ARM::t2VPTv4f32:
9604   case ARM::t2VPTv8f16:
9605   case ARM::t2VPTv16i8r:
9606   case ARM::t2VPTv8i16r:
9607   case ARM::t2VPTv4i32r:
9608   case ARM::t2VPTv16u8r:
9609   case ARM::t2VPTv8u16r:
9610   case ARM::t2VPTv4u32r:
9611   case ARM::t2VPTv16s8r:
9612   case ARM::t2VPTv8s16r:
9613   case ARM::t2VPTv4s32r:
9614   case ARM::t2VPTv4f32r:
9615   case ARM::t2VPTv8f16r: {
9616     assert(!inVPTBlock() && "Nested VPT blocks are not allowed");
9617     MCOperand &MO = Inst.getOperand(0);
9618     VPTState.Mask = MO.getImm();
9619     VPTState.CurPosition = 0;
9620     break;
9621   }
9622   }
9623   return false;
9624 }
9625 
9626 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
9627   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
9628   // suffix depending on whether they're in an IT block or not.
9629   unsigned Opc = Inst.getOpcode();
9630   const MCInstrDesc &MCID = MII.get(Opc);
9631   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
9632     assert(MCID.hasOptionalDef() &&
9633            "optionally flag setting instruction missing optional def operand");
9634     assert(MCID.NumOperands == Inst.getNumOperands() &&
9635            "operand count mismatch!");
9636     // Find the optional-def operand (cc_out).
9637     unsigned OpNo;
9638     for (OpNo = 0;
9639          !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
9640          ++OpNo)
9641       ;
9642     // If we're parsing Thumb1, reject it completely.
9643     if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
9644       return Match_RequiresFlagSetting;
9645     // If we're parsing Thumb2, which form is legal depends on whether we're
9646     // in an IT block.
9647     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
9648         !inITBlock())
9649       return Match_RequiresITBlock;
9650     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
9651         inITBlock())
9652       return Match_RequiresNotITBlock;
9653     // LSL with zero immediate is not allowed in an IT block
9654     if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
9655       return Match_RequiresNotITBlock;
9656   } else if (isThumbOne()) {
9657     // Some high-register supporting Thumb1 encodings only allow both registers
9658     // to be from r0-r7 when in Thumb2.
9659     if (Opc == ARM::tADDhirr && !hasV6MOps() &&
9660         isARMLowRegister(Inst.getOperand(1).getReg()) &&
9661         isARMLowRegister(Inst.getOperand(2).getReg()))
9662       return Match_RequiresThumb2;
9663     // Others only require ARMv6 or later.
9664     else if (Opc == ARM::tMOVr && !hasV6Ops() &&
9665              isARMLowRegister(Inst.getOperand(0).getReg()) &&
9666              isARMLowRegister(Inst.getOperand(1).getReg()))
9667       return Match_RequiresV6;
9668   }
9669 
9670   // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
9671   // than the loop below can handle, so it uses the GPRnopc register class and
9672   // we do SP handling here.
9673   if (Opc == ARM::t2MOVr && !hasV8Ops())
9674   {
9675     // SP as both source and destination is not allowed
9676     if (Inst.getOperand(0).getReg() == ARM::SP &&
9677         Inst.getOperand(1).getReg() == ARM::SP)
9678       return Match_RequiresV8;
9679     // When flags-setting SP as either source or destination is not allowed
9680     if (Inst.getOperand(4).getReg() == ARM::CPSR &&
9681         (Inst.getOperand(0).getReg() == ARM::SP ||
9682          Inst.getOperand(1).getReg() == ARM::SP))
9683       return Match_RequiresV8;
9684   }
9685 
9686   switch (Inst.getOpcode()) {
9687   case ARM::VMRS:
9688   case ARM::VMSR:
9689   case ARM::VMRS_FPCXTS:
9690   case ARM::VMRS_FPCXTNS:
9691   case ARM::VMSR_FPCXTS:
9692   case ARM::VMSR_FPCXTNS:
9693   case ARM::VMRS_FPSCR_NZCVQC:
9694   case ARM::VMSR_FPSCR_NZCVQC:
9695   case ARM::FMSTAT:
9696   case ARM::VMRS_VPR:
9697   case ARM::VMRS_P0:
9698   case ARM::VMSR_VPR:
9699   case ARM::VMSR_P0:
9700     // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
9701     // ARMv8-A.
9702     if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP &&
9703         (isThumb() && !hasV8Ops()))
9704       return Match_InvalidOperand;
9705     break;
9706   default:
9707     break;
9708   }
9709 
9710   for (unsigned I = 0; I < MCID.NumOperands; ++I)
9711     if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
9712       // rGPRRegClass excludes PC, and also excluded SP before ARMv8
9713       if ((Inst.getOperand(I).getReg() == ARM::SP) && !hasV8Ops())
9714         return Match_RequiresV8;
9715       else if (Inst.getOperand(I).getReg() == ARM::PC)
9716         return Match_InvalidOperand;
9717     }
9718 
9719   return Match_Success;
9720 }
9721 
9722 namespace llvm {
9723 
9724 template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
9725   return true; // In an assembly source, no need to second-guess
9726 }
9727 
9728 } // end namespace llvm
9729 
9730 // Returns true if Inst is unpredictable if it is in and IT block, but is not
9731 // the last instruction in the block.
9732 bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
9733   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9734 
9735   // All branch & call instructions terminate IT blocks with the exception of
9736   // SVC.
9737   if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
9738       MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
9739     return true;
9740 
9741   // Any arithmetic instruction which writes to the PC also terminates the IT
9742   // block.
9743   if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
9744     return true;
9745 
9746   return false;
9747 }
9748 
9749 unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
9750                                           SmallVectorImpl<NearMissInfo> &NearMisses,
9751                                           bool MatchingInlineAsm,
9752                                           bool &EmitInITBlock,
9753                                           MCStreamer &Out) {
9754   // If we can't use an implicit IT block here, just match as normal.
9755   if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
9756     return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
9757 
9758   // Try to match the instruction in an extension of the current IT block (if
9759   // there is one).
9760   if (inImplicitITBlock()) {
9761     extendImplicitITBlock(ITState.Cond);
9762     if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
9763             Match_Success) {
9764       // The match succeded, but we still have to check that the instruction is
9765       // valid in this implicit IT block.
9766       const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9767       if (MCID.isPredicable()) {
9768         ARMCC::CondCodes InstCond =
9769             (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9770                 .getImm();
9771         ARMCC::CondCodes ITCond = currentITCond();
9772         if (InstCond == ITCond) {
9773           EmitInITBlock = true;
9774           return Match_Success;
9775         } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
9776           invertCurrentITCondition();
9777           EmitInITBlock = true;
9778           return Match_Success;
9779         }
9780       }
9781     }
9782     rewindImplicitITPosition();
9783   }
9784 
9785   // Finish the current IT block, and try to match outside any IT block.
9786   flushPendingInstructions(Out);
9787   unsigned PlainMatchResult =
9788       MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
9789   if (PlainMatchResult == Match_Success) {
9790     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9791     if (MCID.isPredicable()) {
9792       ARMCC::CondCodes InstCond =
9793           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9794               .getImm();
9795       // Some forms of the branch instruction have their own condition code
9796       // fields, so can be conditionally executed without an IT block.
9797       if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
9798         EmitInITBlock = false;
9799         return Match_Success;
9800       }
9801       if (InstCond == ARMCC::AL) {
9802         EmitInITBlock = false;
9803         return Match_Success;
9804       }
9805     } else {
9806       EmitInITBlock = false;
9807       return Match_Success;
9808     }
9809   }
9810 
9811   // Try to match in a new IT block. The matcher doesn't check the actual
9812   // condition, so we create an IT block with a dummy condition, and fix it up
9813   // once we know the actual condition.
9814   startImplicitITBlock();
9815   if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
9816       Match_Success) {
9817     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9818     if (MCID.isPredicable()) {
9819       ITState.Cond =
9820           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9821               .getImm();
9822       EmitInITBlock = true;
9823       return Match_Success;
9824     }
9825   }
9826   discardImplicitITBlock();
9827 
9828   // If none of these succeed, return the error we got when trying to match
9829   // outside any IT blocks.
9830   EmitInITBlock = false;
9831   return PlainMatchResult;
9832 }
9833 
9834 static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
9835                                          unsigned VariantID = 0);
9836 
9837 static const char *getSubtargetFeatureName(uint64_t Val);
9838 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
9839                                            OperandVector &Operands,
9840                                            MCStreamer &Out, uint64_t &ErrorInfo,
9841                                            bool MatchingInlineAsm) {
9842   MCInst Inst;
9843   unsigned MatchResult;
9844   bool PendConditionalInstruction = false;
9845 
9846   SmallVector<NearMissInfo, 4> NearMisses;
9847   MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
9848                                  PendConditionalInstruction, Out);
9849 
9850   switch (MatchResult) {
9851   case Match_Success:
9852     LLVM_DEBUG(dbgs() << "Parsed as: ";
9853                Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
9854                dbgs() << "\n");
9855 
9856     // Context sensitive operand constraints aren't handled by the matcher,
9857     // so check them here.
9858     if (validateInstruction(Inst, Operands)) {
9859       // Still progress the IT block, otherwise one wrong condition causes
9860       // nasty cascading errors.
9861       forwardITPosition();
9862       forwardVPTPosition();
9863       return true;
9864     }
9865 
9866     { // processInstruction() updates inITBlock state, we need to save it away
9867       bool wasInITBlock = inITBlock();
9868 
9869       // Some instructions need post-processing to, for example, tweak which
9870       // encoding is selected. Loop on it while changes happen so the
9871       // individual transformations can chain off each other. E.g.,
9872       // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
9873       while (processInstruction(Inst, Operands, Out))
9874         LLVM_DEBUG(dbgs() << "Changed to: ";
9875                    Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
9876                    dbgs() << "\n");
9877 
9878       // Only after the instruction is fully processed, we can validate it
9879       if (wasInITBlock && hasV8Ops() && isThumb() &&
9880           !isV8EligibleForIT(&Inst)) {
9881         Warning(IDLoc, "deprecated instruction in IT block");
9882       }
9883     }
9884 
9885     // Only move forward at the very end so that everything in validate
9886     // and process gets a consistent answer about whether we're in an IT
9887     // block.
9888     forwardITPosition();
9889     forwardVPTPosition();
9890 
9891     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
9892     // doesn't actually encode.
9893     if (Inst.getOpcode() == ARM::ITasm)
9894       return false;
9895 
9896     Inst.setLoc(IDLoc);
9897     if (PendConditionalInstruction) {
9898       PendingConditionalInsts.push_back(Inst);
9899       if (isITBlockFull() || isITBlockTerminator(Inst))
9900         flushPendingInstructions(Out);
9901     } else {
9902       Out.EmitInstruction(Inst, getSTI());
9903     }
9904     return false;
9905   case Match_NearMisses:
9906     ReportNearMisses(NearMisses, IDLoc, Operands);
9907     return true;
9908   case Match_MnemonicFail: {
9909     FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
9910     std::string Suggestion = ARMMnemonicSpellCheck(
9911       ((ARMOperand &)*Operands[0]).getToken(), FBS);
9912     return Error(IDLoc, "invalid instruction" + Suggestion,
9913                  ((ARMOperand &)*Operands[0]).getLocRange());
9914   }
9915   }
9916 
9917   llvm_unreachable("Implement any new match types added!");
9918 }
9919 
9920 /// parseDirective parses the arm specific directives
9921 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
9922   const MCObjectFileInfo::Environment Format =
9923     getContext().getObjectFileInfo()->getObjectFileType();
9924   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
9925   bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
9926 
9927   StringRef IDVal = DirectiveID.getIdentifier();
9928   if (IDVal == ".word")
9929     parseLiteralValues(4, DirectiveID.getLoc());
9930   else if (IDVal == ".short" || IDVal == ".hword")
9931     parseLiteralValues(2, DirectiveID.getLoc());
9932   else if (IDVal == ".thumb")
9933     parseDirectiveThumb(DirectiveID.getLoc());
9934   else if (IDVal == ".arm")
9935     parseDirectiveARM(DirectiveID.getLoc());
9936   else if (IDVal == ".thumb_func")
9937     parseDirectiveThumbFunc(DirectiveID.getLoc());
9938   else if (IDVal == ".code")
9939     parseDirectiveCode(DirectiveID.getLoc());
9940   else if (IDVal == ".syntax")
9941     parseDirectiveSyntax(DirectiveID.getLoc());
9942   else if (IDVal == ".unreq")
9943     parseDirectiveUnreq(DirectiveID.getLoc());
9944   else if (IDVal == ".fnend")
9945     parseDirectiveFnEnd(DirectiveID.getLoc());
9946   else if (IDVal == ".cantunwind")
9947     parseDirectiveCantUnwind(DirectiveID.getLoc());
9948   else if (IDVal == ".personality")
9949     parseDirectivePersonality(DirectiveID.getLoc());
9950   else if (IDVal == ".handlerdata")
9951     parseDirectiveHandlerData(DirectiveID.getLoc());
9952   else if (IDVal == ".setfp")
9953     parseDirectiveSetFP(DirectiveID.getLoc());
9954   else if (IDVal == ".pad")
9955     parseDirectivePad(DirectiveID.getLoc());
9956   else if (IDVal == ".save")
9957     parseDirectiveRegSave(DirectiveID.getLoc(), false);
9958   else if (IDVal == ".vsave")
9959     parseDirectiveRegSave(DirectiveID.getLoc(), true);
9960   else if (IDVal == ".ltorg" || IDVal == ".pool")
9961     parseDirectiveLtorg(DirectiveID.getLoc());
9962   else if (IDVal == ".even")
9963     parseDirectiveEven(DirectiveID.getLoc());
9964   else if (IDVal == ".personalityindex")
9965     parseDirectivePersonalityIndex(DirectiveID.getLoc());
9966   else if (IDVal == ".unwind_raw")
9967     parseDirectiveUnwindRaw(DirectiveID.getLoc());
9968   else if (IDVal == ".movsp")
9969     parseDirectiveMovSP(DirectiveID.getLoc());
9970   else if (IDVal == ".arch_extension")
9971     parseDirectiveArchExtension(DirectiveID.getLoc());
9972   else if (IDVal == ".align")
9973     return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
9974   else if (IDVal == ".thumb_set")
9975     parseDirectiveThumbSet(DirectiveID.getLoc());
9976   else if (IDVal == ".inst")
9977     parseDirectiveInst(DirectiveID.getLoc());
9978   else if (IDVal == ".inst.n")
9979     parseDirectiveInst(DirectiveID.getLoc(), 'n');
9980   else if (IDVal == ".inst.w")
9981     parseDirectiveInst(DirectiveID.getLoc(), 'w');
9982   else if (!IsMachO && !IsCOFF) {
9983     if (IDVal == ".arch")
9984       parseDirectiveArch(DirectiveID.getLoc());
9985     else if (IDVal == ".cpu")
9986       parseDirectiveCPU(DirectiveID.getLoc());
9987     else if (IDVal == ".eabi_attribute")
9988       parseDirectiveEabiAttr(DirectiveID.getLoc());
9989     else if (IDVal == ".fpu")
9990       parseDirectiveFPU(DirectiveID.getLoc());
9991     else if (IDVal == ".fnstart")
9992       parseDirectiveFnStart(DirectiveID.getLoc());
9993     else if (IDVal == ".object_arch")
9994       parseDirectiveObjectArch(DirectiveID.getLoc());
9995     else if (IDVal == ".tlsdescseq")
9996       parseDirectiveTLSDescSeq(DirectiveID.getLoc());
9997     else
9998       return true;
9999   } else
10000     return true;
10001   return false;
10002 }
10003 
10004 /// parseLiteralValues
10005 ///  ::= .hword expression [, expression]*
10006 ///  ::= .short expression [, expression]*
10007 ///  ::= .word expression [, expression]*
10008 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
10009   auto parseOne = [&]() -> bool {
10010     const MCExpr *Value;
10011     if (getParser().parseExpression(Value))
10012       return true;
10013     getParser().getStreamer().EmitValue(Value, Size, L);
10014     return false;
10015   };
10016   return (parseMany(parseOne));
10017 }
10018 
10019 /// parseDirectiveThumb
10020 ///  ::= .thumb
10021 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
10022   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
10023       check(!hasThumb(), L, "target does not support Thumb mode"))
10024     return true;
10025 
10026   if (!isThumb())
10027     SwitchMode();
10028 
10029   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
10030   return false;
10031 }
10032 
10033 /// parseDirectiveARM
10034 ///  ::= .arm
10035 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
10036   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
10037       check(!hasARM(), L, "target does not support ARM mode"))
10038     return true;
10039 
10040   if (isThumb())
10041     SwitchMode();
10042   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
10043   return false;
10044 }
10045 
10046 void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol) {
10047   // We need to flush the current implicit IT block on a label, because it is
10048   // not legal to branch into an IT block.
10049   flushPendingInstructions(getStreamer());
10050 }
10051 
10052 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
10053   if (NextSymbolIsThumb) {
10054     getParser().getStreamer().EmitThumbFunc(Symbol);
10055     NextSymbolIsThumb = false;
10056   }
10057 }
10058 
10059 /// parseDirectiveThumbFunc
10060 ///  ::= .thumbfunc symbol_name
10061 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
10062   MCAsmParser &Parser = getParser();
10063   const auto Format = getContext().getObjectFileInfo()->getObjectFileType();
10064   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
10065 
10066   // Darwin asm has (optionally) function name after .thumb_func direction
10067   // ELF doesn't
10068 
10069   if (IsMachO) {
10070     if (Parser.getTok().is(AsmToken::Identifier) ||
10071         Parser.getTok().is(AsmToken::String)) {
10072       MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
10073           Parser.getTok().getIdentifier());
10074       getParser().getStreamer().EmitThumbFunc(Func);
10075       Parser.Lex();
10076       if (parseToken(AsmToken::EndOfStatement,
10077                      "unexpected token in '.thumb_func' directive"))
10078         return true;
10079       return false;
10080     }
10081   }
10082 
10083   if (parseToken(AsmToken::EndOfStatement,
10084                  "unexpected token in '.thumb_func' directive"))
10085     return true;
10086 
10087   NextSymbolIsThumb = true;
10088   return false;
10089 }
10090 
10091 /// parseDirectiveSyntax
10092 ///  ::= .syntax unified | divided
10093 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
10094   MCAsmParser &Parser = getParser();
10095   const AsmToken &Tok = Parser.getTok();
10096   if (Tok.isNot(AsmToken::Identifier)) {
10097     Error(L, "unexpected token in .syntax directive");
10098     return false;
10099   }
10100 
10101   StringRef Mode = Tok.getString();
10102   Parser.Lex();
10103   if (check(Mode == "divided" || Mode == "DIVIDED", L,
10104             "'.syntax divided' arm assembly not supported") ||
10105       check(Mode != "unified" && Mode != "UNIFIED", L,
10106             "unrecognized syntax mode in .syntax directive") ||
10107       parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10108     return true;
10109 
10110   // TODO tell the MC streamer the mode
10111   // getParser().getStreamer().Emit???();
10112   return false;
10113 }
10114 
10115 /// parseDirectiveCode
10116 ///  ::= .code 16 | 32
10117 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
10118   MCAsmParser &Parser = getParser();
10119   const AsmToken &Tok = Parser.getTok();
10120   if (Tok.isNot(AsmToken::Integer))
10121     return Error(L, "unexpected token in .code directive");
10122   int64_t Val = Parser.getTok().getIntVal();
10123   if (Val != 16 && Val != 32) {
10124     Error(L, "invalid operand to .code directive");
10125     return false;
10126   }
10127   Parser.Lex();
10128 
10129   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10130     return true;
10131 
10132   if (Val == 16) {
10133     if (!hasThumb())
10134       return Error(L, "target does not support Thumb mode");
10135 
10136     if (!isThumb())
10137       SwitchMode();
10138     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
10139   } else {
10140     if (!hasARM())
10141       return Error(L, "target does not support ARM mode");
10142 
10143     if (isThumb())
10144       SwitchMode();
10145     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
10146   }
10147 
10148   return false;
10149 }
10150 
10151 /// parseDirectiveReq
10152 ///  ::= name .req registername
10153 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
10154   MCAsmParser &Parser = getParser();
10155   Parser.Lex(); // Eat the '.req' token.
10156   unsigned Reg;
10157   SMLoc SRegLoc, ERegLoc;
10158   if (check(ParseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
10159             "register name expected") ||
10160       parseToken(AsmToken::EndOfStatement,
10161                  "unexpected input in .req directive."))
10162     return true;
10163 
10164   if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
10165     return Error(SRegLoc,
10166                  "redefinition of '" + Name + "' does not match original.");
10167 
10168   return false;
10169 }
10170 
10171 /// parseDirectiveUneq
10172 ///  ::= .unreq registername
10173 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
10174   MCAsmParser &Parser = getParser();
10175   if (Parser.getTok().isNot(AsmToken::Identifier))
10176     return Error(L, "unexpected input in .unreq directive.");
10177   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
10178   Parser.Lex(); // Eat the identifier.
10179   if (parseToken(AsmToken::EndOfStatement,
10180                  "unexpected input in '.unreq' directive"))
10181     return true;
10182   return false;
10183 }
10184 
10185 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
10186 // before, if supported by the new target, or emit mapping symbols for the mode
10187 // switch.
10188 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
10189   if (WasThumb != isThumb()) {
10190     if (WasThumb && hasThumb()) {
10191       // Stay in Thumb mode
10192       SwitchMode();
10193     } else if (!WasThumb && hasARM()) {
10194       // Stay in ARM mode
10195       SwitchMode();
10196     } else {
10197       // Mode switch forced, because the new arch doesn't support the old mode.
10198       getParser().getStreamer().EmitAssemblerFlag(isThumb() ? MCAF_Code16
10199                                                             : MCAF_Code32);
10200       // Warn about the implcit mode switch. GAS does not switch modes here,
10201       // but instead stays in the old mode, reporting an error on any following
10202       // instructions as the mode does not exist on the target.
10203       Warning(Loc, Twine("new target does not support ") +
10204                        (WasThumb ? "thumb" : "arm") + " mode, switching to " +
10205                        (!WasThumb ? "thumb" : "arm") + " mode");
10206     }
10207   }
10208 }
10209 
10210 /// parseDirectiveArch
10211 ///  ::= .arch token
10212 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
10213   StringRef Arch = getParser().parseStringToEndOfStatement().trim();
10214   ARM::ArchKind ID = ARM::parseArch(Arch);
10215 
10216   if (ID == ARM::ArchKind::INVALID)
10217     return Error(L, "Unknown arch name");
10218 
10219   bool WasThumb = isThumb();
10220   Triple T;
10221   MCSubtargetInfo &STI = copySTI();
10222   STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str());
10223   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
10224   FixModeAfterArchChange(WasThumb, L);
10225 
10226   getTargetStreamer().emitArch(ID);
10227   return false;
10228 }
10229 
10230 /// parseDirectiveEabiAttr
10231 ///  ::= .eabi_attribute int, int [, "str"]
10232 ///  ::= .eabi_attribute Tag_name, int [, "str"]
10233 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
10234   MCAsmParser &Parser = getParser();
10235   int64_t Tag;
10236   SMLoc TagLoc;
10237   TagLoc = Parser.getTok().getLoc();
10238   if (Parser.getTok().is(AsmToken::Identifier)) {
10239     StringRef Name = Parser.getTok().getIdentifier();
10240     Tag = ARMBuildAttrs::AttrTypeFromString(Name);
10241     if (Tag == -1) {
10242       Error(TagLoc, "attribute name not recognised: " + Name);
10243       return false;
10244     }
10245     Parser.Lex();
10246   } else {
10247     const MCExpr *AttrExpr;
10248 
10249     TagLoc = Parser.getTok().getLoc();
10250     if (Parser.parseExpression(AttrExpr))
10251       return true;
10252 
10253     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
10254     if (check(!CE, TagLoc, "expected numeric constant"))
10255       return true;
10256 
10257     Tag = CE->getValue();
10258   }
10259 
10260   if (Parser.parseToken(AsmToken::Comma, "comma expected"))
10261     return true;
10262 
10263   StringRef StringValue = "";
10264   bool IsStringValue = false;
10265 
10266   int64_t IntegerValue = 0;
10267   bool IsIntegerValue = false;
10268 
10269   if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
10270     IsStringValue = true;
10271   else if (Tag == ARMBuildAttrs::compatibility) {
10272     IsStringValue = true;
10273     IsIntegerValue = true;
10274   } else if (Tag < 32 || Tag % 2 == 0)
10275     IsIntegerValue = true;
10276   else if (Tag % 2 == 1)
10277     IsStringValue = true;
10278   else
10279     llvm_unreachable("invalid tag type");
10280 
10281   if (IsIntegerValue) {
10282     const MCExpr *ValueExpr;
10283     SMLoc ValueExprLoc = Parser.getTok().getLoc();
10284     if (Parser.parseExpression(ValueExpr))
10285       return true;
10286 
10287     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
10288     if (!CE)
10289       return Error(ValueExprLoc, "expected numeric constant");
10290     IntegerValue = CE->getValue();
10291   }
10292 
10293   if (Tag == ARMBuildAttrs::compatibility) {
10294     if (Parser.parseToken(AsmToken::Comma, "comma expected"))
10295       return true;
10296   }
10297 
10298   if (IsStringValue) {
10299     if (Parser.getTok().isNot(AsmToken::String))
10300       return Error(Parser.getTok().getLoc(), "bad string constant");
10301 
10302     StringValue = Parser.getTok().getStringContents();
10303     Parser.Lex();
10304   }
10305 
10306   if (Parser.parseToken(AsmToken::EndOfStatement,
10307                         "unexpected token in '.eabi_attribute' directive"))
10308     return true;
10309 
10310   if (IsIntegerValue && IsStringValue) {
10311     assert(Tag == ARMBuildAttrs::compatibility);
10312     getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
10313   } else if (IsIntegerValue)
10314     getTargetStreamer().emitAttribute(Tag, IntegerValue);
10315   else if (IsStringValue)
10316     getTargetStreamer().emitTextAttribute(Tag, StringValue);
10317   return false;
10318 }
10319 
10320 /// parseDirectiveCPU
10321 ///  ::= .cpu str
10322 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
10323   StringRef CPU = getParser().parseStringToEndOfStatement().trim();
10324   getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
10325 
10326   // FIXME: This is using table-gen data, but should be moved to
10327   // ARMTargetParser once that is table-gen'd.
10328   if (!getSTI().isCPUStringValid(CPU))
10329     return Error(L, "Unknown CPU name");
10330 
10331   bool WasThumb = isThumb();
10332   MCSubtargetInfo &STI = copySTI();
10333   STI.setDefaultFeatures(CPU, "");
10334   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
10335   FixModeAfterArchChange(WasThumb, L);
10336 
10337   return false;
10338 }
10339 
10340 /// parseDirectiveFPU
10341 ///  ::= .fpu str
10342 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
10343   SMLoc FPUNameLoc = getTok().getLoc();
10344   StringRef FPU = getParser().parseStringToEndOfStatement().trim();
10345 
10346   unsigned ID = ARM::parseFPU(FPU);
10347   std::vector<StringRef> Features;
10348   if (!ARM::getFPUFeatures(ID, Features))
10349     return Error(FPUNameLoc, "Unknown FPU name");
10350 
10351   MCSubtargetInfo &STI = copySTI();
10352   for (auto Feature : Features)
10353     STI.ApplyFeatureFlag(Feature);
10354   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
10355 
10356   getTargetStreamer().emitFPU(ID);
10357   return false;
10358 }
10359 
10360 /// parseDirectiveFnStart
10361 ///  ::= .fnstart
10362 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
10363   if (parseToken(AsmToken::EndOfStatement,
10364                  "unexpected token in '.fnstart' directive"))
10365     return true;
10366 
10367   if (UC.hasFnStart()) {
10368     Error(L, ".fnstart starts before the end of previous one");
10369     UC.emitFnStartLocNotes();
10370     return true;
10371   }
10372 
10373   // Reset the unwind directives parser state
10374   UC.reset();
10375 
10376   getTargetStreamer().emitFnStart();
10377 
10378   UC.recordFnStart(L);
10379   return false;
10380 }
10381 
10382 /// parseDirectiveFnEnd
10383 ///  ::= .fnend
10384 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
10385   if (parseToken(AsmToken::EndOfStatement,
10386                  "unexpected token in '.fnend' directive"))
10387     return true;
10388   // Check the ordering of unwind directives
10389   if (!UC.hasFnStart())
10390     return Error(L, ".fnstart must precede .fnend directive");
10391 
10392   // Reset the unwind directives parser state
10393   getTargetStreamer().emitFnEnd();
10394 
10395   UC.reset();
10396   return false;
10397 }
10398 
10399 /// parseDirectiveCantUnwind
10400 ///  ::= .cantunwind
10401 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
10402   if (parseToken(AsmToken::EndOfStatement,
10403                  "unexpected token in '.cantunwind' directive"))
10404     return true;
10405 
10406   UC.recordCantUnwind(L);
10407   // Check the ordering of unwind directives
10408   if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
10409     return true;
10410 
10411   if (UC.hasHandlerData()) {
10412     Error(L, ".cantunwind can't be used with .handlerdata directive");
10413     UC.emitHandlerDataLocNotes();
10414     return true;
10415   }
10416   if (UC.hasPersonality()) {
10417     Error(L, ".cantunwind can't be used with .personality directive");
10418     UC.emitPersonalityLocNotes();
10419     return true;
10420   }
10421 
10422   getTargetStreamer().emitCantUnwind();
10423   return false;
10424 }
10425 
10426 /// parseDirectivePersonality
10427 ///  ::= .personality name
10428 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
10429   MCAsmParser &Parser = getParser();
10430   bool HasExistingPersonality = UC.hasPersonality();
10431 
10432   // Parse the name of the personality routine
10433   if (Parser.getTok().isNot(AsmToken::Identifier))
10434     return Error(L, "unexpected input in .personality directive.");
10435   StringRef Name(Parser.getTok().getIdentifier());
10436   Parser.Lex();
10437 
10438   if (parseToken(AsmToken::EndOfStatement,
10439                  "unexpected token in '.personality' directive"))
10440     return true;
10441 
10442   UC.recordPersonality(L);
10443 
10444   // Check the ordering of unwind directives
10445   if (!UC.hasFnStart())
10446     return Error(L, ".fnstart must precede .personality directive");
10447   if (UC.cantUnwind()) {
10448     Error(L, ".personality can't be used with .cantunwind directive");
10449     UC.emitCantUnwindLocNotes();
10450     return true;
10451   }
10452   if (UC.hasHandlerData()) {
10453     Error(L, ".personality must precede .handlerdata directive");
10454     UC.emitHandlerDataLocNotes();
10455     return true;
10456   }
10457   if (HasExistingPersonality) {
10458     Error(L, "multiple personality directives");
10459     UC.emitPersonalityLocNotes();
10460     return true;
10461   }
10462 
10463   MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
10464   getTargetStreamer().emitPersonality(PR);
10465   return false;
10466 }
10467 
10468 /// parseDirectiveHandlerData
10469 ///  ::= .handlerdata
10470 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
10471   if (parseToken(AsmToken::EndOfStatement,
10472                  "unexpected token in '.handlerdata' directive"))
10473     return true;
10474 
10475   UC.recordHandlerData(L);
10476   // Check the ordering of unwind directives
10477   if (!UC.hasFnStart())
10478     return Error(L, ".fnstart must precede .personality directive");
10479   if (UC.cantUnwind()) {
10480     Error(L, ".handlerdata can't be used with .cantunwind directive");
10481     UC.emitCantUnwindLocNotes();
10482     return true;
10483   }
10484 
10485   getTargetStreamer().emitHandlerData();
10486   return false;
10487 }
10488 
10489 /// parseDirectiveSetFP
10490 ///  ::= .setfp fpreg, spreg [, offset]
10491 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
10492   MCAsmParser &Parser = getParser();
10493   // Check the ordering of unwind directives
10494   if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
10495       check(UC.hasHandlerData(), L,
10496             ".setfp must precede .handlerdata directive"))
10497     return true;
10498 
10499   // Parse fpreg
10500   SMLoc FPRegLoc = Parser.getTok().getLoc();
10501   int FPReg = tryParseRegister();
10502 
10503   if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
10504       Parser.parseToken(AsmToken::Comma, "comma expected"))
10505     return true;
10506 
10507   // Parse spreg
10508   SMLoc SPRegLoc = Parser.getTok().getLoc();
10509   int SPReg = tryParseRegister();
10510   if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
10511       check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
10512             "register should be either $sp or the latest fp register"))
10513     return true;
10514 
10515   // Update the frame pointer register
10516   UC.saveFPReg(FPReg);
10517 
10518   // Parse offset
10519   int64_t Offset = 0;
10520   if (Parser.parseOptionalToken(AsmToken::Comma)) {
10521     if (Parser.getTok().isNot(AsmToken::Hash) &&
10522         Parser.getTok().isNot(AsmToken::Dollar))
10523       return Error(Parser.getTok().getLoc(), "'#' expected");
10524     Parser.Lex(); // skip hash token.
10525 
10526     const MCExpr *OffsetExpr;
10527     SMLoc ExLoc = Parser.getTok().getLoc();
10528     SMLoc EndLoc;
10529     if (getParser().parseExpression(OffsetExpr, EndLoc))
10530       return Error(ExLoc, "malformed setfp offset");
10531     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10532     if (check(!CE, ExLoc, "setfp offset must be an immediate"))
10533       return true;
10534     Offset = CE->getValue();
10535   }
10536 
10537   if (Parser.parseToken(AsmToken::EndOfStatement))
10538     return true;
10539 
10540   getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
10541                                 static_cast<unsigned>(SPReg), Offset);
10542   return false;
10543 }
10544 
10545 /// parseDirective
10546 ///  ::= .pad offset
10547 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
10548   MCAsmParser &Parser = getParser();
10549   // Check the ordering of unwind directives
10550   if (!UC.hasFnStart())
10551     return Error(L, ".fnstart must precede .pad directive");
10552   if (UC.hasHandlerData())
10553     return Error(L, ".pad must precede .handlerdata directive");
10554 
10555   // Parse the offset
10556   if (Parser.getTok().isNot(AsmToken::Hash) &&
10557       Parser.getTok().isNot(AsmToken::Dollar))
10558     return Error(Parser.getTok().getLoc(), "'#' expected");
10559   Parser.Lex(); // skip hash token.
10560 
10561   const MCExpr *OffsetExpr;
10562   SMLoc ExLoc = Parser.getTok().getLoc();
10563   SMLoc EndLoc;
10564   if (getParser().parseExpression(OffsetExpr, EndLoc))
10565     return Error(ExLoc, "malformed pad offset");
10566   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10567   if (!CE)
10568     return Error(ExLoc, "pad offset must be an immediate");
10569 
10570   if (parseToken(AsmToken::EndOfStatement,
10571                  "unexpected token in '.pad' directive"))
10572     return true;
10573 
10574   getTargetStreamer().emitPad(CE->getValue());
10575   return false;
10576 }
10577 
10578 /// parseDirectiveRegSave
10579 ///  ::= .save  { registers }
10580 ///  ::= .vsave { registers }
10581 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
10582   // Check the ordering of unwind directives
10583   if (!UC.hasFnStart())
10584     return Error(L, ".fnstart must precede .save or .vsave directives");
10585   if (UC.hasHandlerData())
10586     return Error(L, ".save or .vsave must precede .handlerdata directive");
10587 
10588   // RAII object to make sure parsed operands are deleted.
10589   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
10590 
10591   // Parse the register list
10592   if (parseRegisterList(Operands) ||
10593       parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10594     return true;
10595   ARMOperand &Op = (ARMOperand &)*Operands[0];
10596   if (!IsVector && !Op.isRegList())
10597     return Error(L, ".save expects GPR registers");
10598   if (IsVector && !Op.isDPRRegList())
10599     return Error(L, ".vsave expects DPR registers");
10600 
10601   getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
10602   return false;
10603 }
10604 
10605 /// parseDirectiveInst
10606 ///  ::= .inst opcode [, ...]
10607 ///  ::= .inst.n opcode [, ...]
10608 ///  ::= .inst.w opcode [, ...]
10609 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
10610   int Width = 4;
10611 
10612   if (isThumb()) {
10613     switch (Suffix) {
10614     case 'n':
10615       Width = 2;
10616       break;
10617     case 'w':
10618       break;
10619     default:
10620       Width = 0;
10621       break;
10622     }
10623   } else {
10624     if (Suffix)
10625       return Error(Loc, "width suffixes are invalid in ARM mode");
10626   }
10627 
10628   auto parseOne = [&]() -> bool {
10629     const MCExpr *Expr;
10630     if (getParser().parseExpression(Expr))
10631       return true;
10632     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
10633     if (!Value) {
10634       return Error(Loc, "expected constant expression");
10635     }
10636 
10637     char CurSuffix = Suffix;
10638     switch (Width) {
10639     case 2:
10640       if (Value->getValue() > 0xffff)
10641         return Error(Loc, "inst.n operand is too big, use inst.w instead");
10642       break;
10643     case 4:
10644       if (Value->getValue() > 0xffffffff)
10645         return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
10646                               " operand is too big");
10647       break;
10648     case 0:
10649       // Thumb mode, no width indicated. Guess from the opcode, if possible.
10650       if (Value->getValue() < 0xe800)
10651         CurSuffix = 'n';
10652       else if (Value->getValue() >= 0xe8000000)
10653         CurSuffix = 'w';
10654       else
10655         return Error(Loc, "cannot determine Thumb instruction size, "
10656                           "use inst.n/inst.w instead");
10657       break;
10658     default:
10659       llvm_unreachable("only supported widths are 2 and 4");
10660     }
10661 
10662     getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
10663     return false;
10664   };
10665 
10666   if (parseOptionalToken(AsmToken::EndOfStatement))
10667     return Error(Loc, "expected expression following directive");
10668   if (parseMany(parseOne))
10669     return true;
10670   return false;
10671 }
10672 
10673 /// parseDirectiveLtorg
10674 ///  ::= .ltorg | .pool
10675 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
10676   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10677     return true;
10678   getTargetStreamer().emitCurrentConstantPool();
10679   return false;
10680 }
10681 
10682 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
10683   const MCSection *Section = getStreamer().getCurrentSectionOnly();
10684 
10685   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10686     return true;
10687 
10688   if (!Section) {
10689     getStreamer().InitSections(false);
10690     Section = getStreamer().getCurrentSectionOnly();
10691   }
10692 
10693   assert(Section && "must have section to emit alignment");
10694   if (Section->UseCodeAlign())
10695     getStreamer().EmitCodeAlignment(2);
10696   else
10697     getStreamer().EmitValueToAlignment(2);
10698 
10699   return false;
10700 }
10701 
10702 /// parseDirectivePersonalityIndex
10703 ///   ::= .personalityindex index
10704 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
10705   MCAsmParser &Parser = getParser();
10706   bool HasExistingPersonality = UC.hasPersonality();
10707 
10708   const MCExpr *IndexExpression;
10709   SMLoc IndexLoc = Parser.getTok().getLoc();
10710   if (Parser.parseExpression(IndexExpression) ||
10711       parseToken(AsmToken::EndOfStatement,
10712                  "unexpected token in '.personalityindex' directive")) {
10713     return true;
10714   }
10715 
10716   UC.recordPersonalityIndex(L);
10717 
10718   if (!UC.hasFnStart()) {
10719     return Error(L, ".fnstart must precede .personalityindex directive");
10720   }
10721   if (UC.cantUnwind()) {
10722     Error(L, ".personalityindex cannot be used with .cantunwind");
10723     UC.emitCantUnwindLocNotes();
10724     return true;
10725   }
10726   if (UC.hasHandlerData()) {
10727     Error(L, ".personalityindex must precede .handlerdata directive");
10728     UC.emitHandlerDataLocNotes();
10729     return true;
10730   }
10731   if (HasExistingPersonality) {
10732     Error(L, "multiple personality directives");
10733     UC.emitPersonalityLocNotes();
10734     return true;
10735   }
10736 
10737   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
10738   if (!CE)
10739     return Error(IndexLoc, "index must be a constant number");
10740   if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
10741     return Error(IndexLoc,
10742                  "personality routine index should be in range [0-3]");
10743 
10744   getTargetStreamer().emitPersonalityIndex(CE->getValue());
10745   return false;
10746 }
10747 
10748 /// parseDirectiveUnwindRaw
10749 ///   ::= .unwind_raw offset, opcode [, opcode...]
10750 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
10751   MCAsmParser &Parser = getParser();
10752   int64_t StackOffset;
10753   const MCExpr *OffsetExpr;
10754   SMLoc OffsetLoc = getLexer().getLoc();
10755 
10756   if (!UC.hasFnStart())
10757     return Error(L, ".fnstart must precede .unwind_raw directives");
10758   if (getParser().parseExpression(OffsetExpr))
10759     return Error(OffsetLoc, "expected expression");
10760 
10761   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10762   if (!CE)
10763     return Error(OffsetLoc, "offset must be a constant");
10764 
10765   StackOffset = CE->getValue();
10766 
10767   if (Parser.parseToken(AsmToken::Comma, "expected comma"))
10768     return true;
10769 
10770   SmallVector<uint8_t, 16> Opcodes;
10771 
10772   auto parseOne = [&]() -> bool {
10773     const MCExpr *OE;
10774     SMLoc OpcodeLoc = getLexer().getLoc();
10775     if (check(getLexer().is(AsmToken::EndOfStatement) ||
10776                   Parser.parseExpression(OE),
10777               OpcodeLoc, "expected opcode expression"))
10778       return true;
10779     const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
10780     if (!OC)
10781       return Error(OpcodeLoc, "opcode value must be a constant");
10782     const int64_t Opcode = OC->getValue();
10783     if (Opcode & ~0xff)
10784       return Error(OpcodeLoc, "invalid opcode");
10785     Opcodes.push_back(uint8_t(Opcode));
10786     return false;
10787   };
10788 
10789   // Must have at least 1 element
10790   SMLoc OpcodeLoc = getLexer().getLoc();
10791   if (parseOptionalToken(AsmToken::EndOfStatement))
10792     return Error(OpcodeLoc, "expected opcode expression");
10793   if (parseMany(parseOne))
10794     return true;
10795 
10796   getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
10797   return false;
10798 }
10799 
10800 /// parseDirectiveTLSDescSeq
10801 ///   ::= .tlsdescseq tls-variable
10802 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
10803   MCAsmParser &Parser = getParser();
10804 
10805   if (getLexer().isNot(AsmToken::Identifier))
10806     return TokError("expected variable after '.tlsdescseq' directive");
10807 
10808   const MCSymbolRefExpr *SRE =
10809     MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
10810                             MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
10811   Lex();
10812 
10813   if (parseToken(AsmToken::EndOfStatement,
10814                  "unexpected token in '.tlsdescseq' directive"))
10815     return true;
10816 
10817   getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
10818   return false;
10819 }
10820 
10821 /// parseDirectiveMovSP
10822 ///  ::= .movsp reg [, #offset]
10823 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
10824   MCAsmParser &Parser = getParser();
10825   if (!UC.hasFnStart())
10826     return Error(L, ".fnstart must precede .movsp directives");
10827   if (UC.getFPReg() != ARM::SP)
10828     return Error(L, "unexpected .movsp directive");
10829 
10830   SMLoc SPRegLoc = Parser.getTok().getLoc();
10831   int SPReg = tryParseRegister();
10832   if (SPReg == -1)
10833     return Error(SPRegLoc, "register expected");
10834   if (SPReg == ARM::SP || SPReg == ARM::PC)
10835     return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
10836 
10837   int64_t Offset = 0;
10838   if (Parser.parseOptionalToken(AsmToken::Comma)) {
10839     if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
10840       return true;
10841 
10842     const MCExpr *OffsetExpr;
10843     SMLoc OffsetLoc = Parser.getTok().getLoc();
10844 
10845     if (Parser.parseExpression(OffsetExpr))
10846       return Error(OffsetLoc, "malformed offset expression");
10847 
10848     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10849     if (!CE)
10850       return Error(OffsetLoc, "offset must be an immediate constant");
10851 
10852     Offset = CE->getValue();
10853   }
10854 
10855   if (parseToken(AsmToken::EndOfStatement,
10856                  "unexpected token in '.movsp' directive"))
10857     return true;
10858 
10859   getTargetStreamer().emitMovSP(SPReg, Offset);
10860   UC.saveFPReg(SPReg);
10861 
10862   return false;
10863 }
10864 
10865 /// parseDirectiveObjectArch
10866 ///   ::= .object_arch name
10867 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
10868   MCAsmParser &Parser = getParser();
10869   if (getLexer().isNot(AsmToken::Identifier))
10870     return Error(getLexer().getLoc(), "unexpected token");
10871 
10872   StringRef Arch = Parser.getTok().getString();
10873   SMLoc ArchLoc = Parser.getTok().getLoc();
10874   Lex();
10875 
10876   ARM::ArchKind ID = ARM::parseArch(Arch);
10877 
10878   if (ID == ARM::ArchKind::INVALID)
10879     return Error(ArchLoc, "unknown architecture '" + Arch + "'");
10880   if (parseToken(AsmToken::EndOfStatement))
10881     return true;
10882 
10883   getTargetStreamer().emitObjectArch(ID);
10884   return false;
10885 }
10886 
10887 /// parseDirectiveAlign
10888 ///   ::= .align
10889 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
10890   // NOTE: if this is not the end of the statement, fall back to the target
10891   // agnostic handling for this directive which will correctly handle this.
10892   if (parseOptionalToken(AsmToken::EndOfStatement)) {
10893     // '.align' is target specifically handled to mean 2**2 byte alignment.
10894     const MCSection *Section = getStreamer().getCurrentSectionOnly();
10895     assert(Section && "must have section to emit alignment");
10896     if (Section->UseCodeAlign())
10897       getStreamer().EmitCodeAlignment(4, 0);
10898     else
10899       getStreamer().EmitValueToAlignment(4, 0, 1, 0);
10900     return false;
10901   }
10902   return true;
10903 }
10904 
10905 /// parseDirectiveThumbSet
10906 ///  ::= .thumb_set name, value
10907 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
10908   MCAsmParser &Parser = getParser();
10909 
10910   StringRef Name;
10911   if (check(Parser.parseIdentifier(Name),
10912             "expected identifier after '.thumb_set'") ||
10913       parseToken(AsmToken::Comma, "expected comma after name '" + Name + "'"))
10914     return true;
10915 
10916   MCSymbol *Sym;
10917   const MCExpr *Value;
10918   if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
10919                                                Parser, Sym, Value))
10920     return true;
10921 
10922   getTargetStreamer().emitThumbSet(Sym, Value);
10923   return false;
10924 }
10925 
10926 /// Force static initialization.
10927 extern "C" void LLVMInitializeARMAsmParser() {
10928   RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
10929   RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
10930   RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
10931   RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
10932 }
10933 
10934 #define GET_REGISTER_MATCHER
10935 #define GET_SUBTARGET_FEATURE_NAME
10936 #define GET_MATCHER_IMPLEMENTATION
10937 #define GET_MNEMONIC_SPELL_CHECKER
10938 #include "ARMGenAsmMatcher.inc"
10939 
10940 // Some diagnostics need to vary with subtarget features, so they are handled
10941 // here. For example, the DPR class has either 16 or 32 registers, depending
10942 // on the FPU available.
10943 const char *
10944 ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
10945   switch (MatchError) {
10946   // rGPR contains sp starting with ARMv8.
10947   case Match_rGPR:
10948     return hasV8Ops() ? "operand must be a register in range [r0, r14]"
10949                       : "operand must be a register in range [r0, r12] or r14";
10950   // DPR contains 16 registers for some FPUs, and 32 for others.
10951   case Match_DPR:
10952     return hasD32() ? "operand must be a register in range [d0, d31]"
10953                     : "operand must be a register in range [d0, d15]";
10954   case Match_DPR_RegList:
10955     return hasD32() ? "operand must be a list of registers in range [d0, d31]"
10956                     : "operand must be a list of registers in range [d0, d15]";
10957 
10958   // For all other diags, use the static string from tablegen.
10959   default:
10960     return getMatchKindDiag(MatchError);
10961   }
10962 }
10963 
10964 // Process the list of near-misses, throwing away ones we don't want to report
10965 // to the user, and converting the rest to a source location and string that
10966 // should be reported.
10967 void
10968 ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
10969                                SmallVectorImpl<NearMissMessage> &NearMissesOut,
10970                                SMLoc IDLoc, OperandVector &Operands) {
10971   // TODO: If operand didn't match, sub in a dummy one and run target
10972   // predicate, so that we can avoid reporting near-misses that are invalid?
10973   // TODO: Many operand types dont have SuperClasses set, so we report
10974   // redundant ones.
10975   // TODO: Some operands are superclasses of registers (e.g.
10976   // MCK_RegShiftedImm), we don't have any way to represent that currently.
10977   // TODO: This is not all ARM-specific, can some of it be factored out?
10978 
10979   // Record some information about near-misses that we have already seen, so
10980   // that we can avoid reporting redundant ones. For example, if there are
10981   // variants of an instruction that take 8- and 16-bit immediates, we want
10982   // to only report the widest one.
10983   std::multimap<unsigned, unsigned> OperandMissesSeen;
10984   SmallSet<FeatureBitset, 4> FeatureMissesSeen;
10985   bool ReportedTooFewOperands = false;
10986 
10987   // Process the near-misses in reverse order, so that we see more general ones
10988   // first, and so can avoid emitting more specific ones.
10989   for (NearMissInfo &I : reverse(NearMissesIn)) {
10990     switch (I.getKind()) {
10991     case NearMissInfo::NearMissOperand: {
10992       SMLoc OperandLoc =
10993           ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
10994       const char *OperandDiag =
10995           getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
10996 
10997       // If we have already emitted a message for a superclass, don't also report
10998       // the sub-class. We consider all operand classes that we don't have a
10999       // specialised diagnostic for to be equal for the propose of this check,
11000       // so that we don't report the generic error multiple times on the same
11001       // operand.
11002       unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
11003       auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
11004       if (std::any_of(PrevReports.first, PrevReports.second,
11005                       [DupCheckMatchClass](
11006                           const std::pair<unsigned, unsigned> Pair) {
11007             if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
11008               return Pair.second == DupCheckMatchClass;
11009             else
11010               return isSubclass((MatchClassKind)DupCheckMatchClass,
11011                                 (MatchClassKind)Pair.second);
11012           }))
11013         break;
11014       OperandMissesSeen.insert(
11015           std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
11016 
11017       NearMissMessage Message;
11018       Message.Loc = OperandLoc;
11019       if (OperandDiag) {
11020         Message.Message = OperandDiag;
11021       } else if (I.getOperandClass() == InvalidMatchClass) {
11022         Message.Message = "too many operands for instruction";
11023       } else {
11024         Message.Message = "invalid operand for instruction";
11025         LLVM_DEBUG(
11026             dbgs() << "Missing diagnostic string for operand class "
11027                    << getMatchClassName((MatchClassKind)I.getOperandClass())
11028                    << I.getOperandClass() << ", error " << I.getOperandError()
11029                    << ", opcode " << MII.getName(I.getOpcode()) << "\n");
11030       }
11031       NearMissesOut.emplace_back(Message);
11032       break;
11033     }
11034     case NearMissInfo::NearMissFeature: {
11035       const FeatureBitset &MissingFeatures = I.getFeatures();
11036       // Don't report the same set of features twice.
11037       if (FeatureMissesSeen.count(MissingFeatures))
11038         break;
11039       FeatureMissesSeen.insert(MissingFeatures);
11040 
11041       // Special case: don't report a feature set which includes arm-mode for
11042       // targets that don't have ARM mode.
11043       if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
11044         break;
11045       // Don't report any near-misses that both require switching instruction
11046       // set, and adding other subtarget features.
11047       if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
11048           MissingFeatures.count() > 1)
11049         break;
11050       if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
11051           MissingFeatures.count() > 1)
11052         break;
11053       if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
11054           (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
11055                                              Feature_IsThumbBit})).any())
11056         break;
11057       if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
11058         break;
11059 
11060       NearMissMessage Message;
11061       Message.Loc = IDLoc;
11062       raw_svector_ostream OS(Message.Message);
11063 
11064       OS << "instruction requires:";
11065       for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
11066         if (MissingFeatures.test(i))
11067           OS << ' ' << getSubtargetFeatureName(i);
11068 
11069       NearMissesOut.emplace_back(Message);
11070 
11071       break;
11072     }
11073     case NearMissInfo::NearMissPredicate: {
11074       NearMissMessage Message;
11075       Message.Loc = IDLoc;
11076       switch (I.getPredicateError()) {
11077       case Match_RequiresNotITBlock:
11078         Message.Message = "flag setting instruction only valid outside IT block";
11079         break;
11080       case Match_RequiresITBlock:
11081         Message.Message = "instruction only valid inside IT block";
11082         break;
11083       case Match_RequiresV6:
11084         Message.Message = "instruction variant requires ARMv6 or later";
11085         break;
11086       case Match_RequiresThumb2:
11087         Message.Message = "instruction variant requires Thumb2";
11088         break;
11089       case Match_RequiresV8:
11090         Message.Message = "instruction variant requires ARMv8 or later";
11091         break;
11092       case Match_RequiresFlagSetting:
11093         Message.Message = "no flag-preserving variant of this instruction available";
11094         break;
11095       case Match_InvalidOperand:
11096         Message.Message = "invalid operand for instruction";
11097         break;
11098       default:
11099         llvm_unreachable("Unhandled target predicate error");
11100         break;
11101       }
11102       NearMissesOut.emplace_back(Message);
11103       break;
11104     }
11105     case NearMissInfo::NearMissTooFewOperands: {
11106       if (!ReportedTooFewOperands) {
11107         SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
11108         NearMissesOut.emplace_back(NearMissMessage{
11109             EndLoc, StringRef("too few operands for instruction")});
11110         ReportedTooFewOperands = true;
11111       }
11112       break;
11113     }
11114     case NearMissInfo::NoNearMiss:
11115       // This should never leave the matcher.
11116       llvm_unreachable("not a near-miss");
11117       break;
11118     }
11119   }
11120 }
11121 
11122 void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
11123                                     SMLoc IDLoc, OperandVector &Operands) {
11124   SmallVector<NearMissMessage, 4> Messages;
11125   FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
11126 
11127   if (Messages.size() == 0) {
11128     // No near-misses were found, so the best we can do is "invalid
11129     // instruction".
11130     Error(IDLoc, "invalid instruction");
11131   } else if (Messages.size() == 1) {
11132     // One near miss was found, report it as the sole error.
11133     Error(Messages[0].Loc, Messages[0].Message);
11134   } else {
11135     // More than one near miss, so report a generic "invalid instruction"
11136     // error, followed by notes for each of the near-misses.
11137     Error(IDLoc, "invalid instruction, any one of the following would fix this:");
11138     for (auto &M : Messages) {
11139       Note(M.Loc, M.Message);
11140     }
11141   }
11142 }
11143 
11144 /// parseDirectiveArchExtension
11145 ///   ::= .arch_extension [no]feature
11146 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
11147   // FIXME: This structure should be moved inside ARMTargetParser
11148   // when we start to table-generate them, and we can use the ARM
11149   // flags below, that were generated by table-gen.
11150   static const struct {
11151     const unsigned Kind;
11152     const FeatureBitset ArchCheck;
11153     const FeatureBitset Features;
11154   } Extensions[] = {
11155     { ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC} },
11156     { ARM::AEK_CRYPTO,  {Feature_HasV8Bit},
11157       {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
11158     { ARM::AEK_FP, {Feature_HasV8Bit},
11159       {ARM::FeatureVFP2_D16_SP, ARM::FeatureFPARMv8} },
11160     { (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
11161       {Feature_HasV7Bit, Feature_IsNotMClassBit},
11162       {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} },
11163     { ARM::AEK_MP, {Feature_HasV7Bit, Feature_IsNotMClassBit},
11164       {ARM::FeatureMP} },
11165     { ARM::AEK_SIMD, {Feature_HasV8Bit},
11166       {ARM::FeatureNEON, ARM::FeatureVFP2_D16_SP, ARM::FeatureFPARMv8} },
11167     { ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone} },
11168     // FIXME: Only available in A-class, isel not predicated
11169     { ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization} },
11170     { ARM::AEK_FP16, {Feature_HasV8_2aBit},
11171       {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
11172     { ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS} },
11173     { ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB} },
11174     // FIXME: Unsupported extensions.
11175     { ARM::AEK_OS, {}, {} },
11176     { ARM::AEK_IWMMXT, {}, {} },
11177     { ARM::AEK_IWMMXT2, {}, {} },
11178     { ARM::AEK_MAVERICK, {}, {} },
11179     { ARM::AEK_XSCALE, {}, {} },
11180   };
11181 
11182   MCAsmParser &Parser = getParser();
11183 
11184   if (getLexer().isNot(AsmToken::Identifier))
11185     return Error(getLexer().getLoc(), "expected architecture extension name");
11186 
11187   StringRef Name = Parser.getTok().getString();
11188   SMLoc ExtLoc = Parser.getTok().getLoc();
11189   Lex();
11190 
11191   if (parseToken(AsmToken::EndOfStatement,
11192                  "unexpected token in '.arch_extension' directive"))
11193     return true;
11194 
11195   bool EnableFeature = true;
11196   if (Name.startswith_lower("no")) {
11197     EnableFeature = false;
11198     Name = Name.substr(2);
11199   }
11200   unsigned FeatureKind = ARM::parseArchExt(Name);
11201   if (FeatureKind == ARM::AEK_INVALID)
11202     return Error(ExtLoc, "unknown architectural extension: " + Name);
11203 
11204   for (const auto &Extension : Extensions) {
11205     if (Extension.Kind != FeatureKind)
11206       continue;
11207 
11208     if (Extension.Features.none())
11209       return Error(ExtLoc, "unsupported architectural extension: " + Name);
11210 
11211     if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
11212       return Error(ExtLoc, "architectural extension '" + Name +
11213                                "' is not "
11214                                "allowed for the current base architecture");
11215 
11216     MCSubtargetInfo &STI = copySTI();
11217     if (EnableFeature) {
11218       STI.SetFeatureBitsTransitively(Extension.Features);
11219     } else {
11220       STI.ClearFeatureBitsTransitively(Extension.Features);
11221     }
11222     FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
11223     setAvailableFeatures(Features);
11224     return false;
11225   }
11226 
11227   return Error(ExtLoc, "unknown architectural extension: " + Name);
11228 }
11229 
11230 // Define this matcher function after the auto-generated include so we
11231 // have the match class enum definitions.
11232 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
11233                                                   unsigned Kind) {
11234   ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
11235   // If the kind is a token for a literal immediate, check if our asm
11236   // operand matches. This is for InstAliases which have a fixed-value
11237   // immediate in the syntax.
11238   switch (Kind) {
11239   default: break;
11240   case MCK__35_0:
11241     if (Op.isImm())
11242       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
11243         if (CE->getValue() == 0)
11244           return Match_Success;
11245     break;
11246   case MCK_ModImm:
11247     if (Op.isImm()) {
11248       const MCExpr *SOExpr = Op.getImm();
11249       int64_t Value;
11250       if (!SOExpr->evaluateAsAbsolute(Value))
11251         return Match_Success;
11252       assert((Value >= std::numeric_limits<int32_t>::min() &&
11253               Value <= std::numeric_limits<uint32_t>::max()) &&
11254              "expression value must be representable in 32 bits");
11255     }
11256     break;
11257   case MCK_rGPR:
11258     if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
11259       return Match_Success;
11260     return Match_rGPR;
11261   case MCK_GPRPair:
11262     if (Op.isReg() &&
11263         MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
11264       return Match_Success;
11265     break;
11266   }
11267   return Match_InvalidOperand;
11268 }
11269 
11270 bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic,
11271                                            StringRef ExtraToken) {
11272   if (!hasMVE())
11273     return false;
11274 
11275   return Mnemonic.startswith("vabav") || Mnemonic.startswith("vaddv") ||
11276          Mnemonic.startswith("vaddlv") || Mnemonic.startswith("vminnmv") ||
11277          Mnemonic.startswith("vminnmav") || Mnemonic.startswith("vminv") ||
11278          Mnemonic.startswith("vminav") || Mnemonic.startswith("vmaxnmv") ||
11279          Mnemonic.startswith("vmaxnmav") || Mnemonic.startswith("vmaxv") ||
11280          Mnemonic.startswith("vmaxav") || Mnemonic.startswith("vmladav") ||
11281          Mnemonic.startswith("vrmlaldavh") || Mnemonic.startswith("vrmlalvh") ||
11282          Mnemonic.startswith("vmlsdav") || Mnemonic.startswith("vmlav") ||
11283          Mnemonic.startswith("vmlaldav") || Mnemonic.startswith("vmlalv") ||
11284          Mnemonic.startswith("vmaxnm") || Mnemonic.startswith("vminnm") ||
11285          Mnemonic.startswith("vmax") || Mnemonic.startswith("vmin") ||
11286          Mnemonic.startswith("vshlc") || Mnemonic.startswith("vmovlt") ||
11287          Mnemonic.startswith("vmovlb") || Mnemonic.startswith("vshll") ||
11288          Mnemonic.startswith("vrshrn") || Mnemonic.startswith("vshrn") ||
11289          Mnemonic.startswith("vqrshrun") || Mnemonic.startswith("vqshrun") ||
11290          Mnemonic.startswith("vqrshrn") || Mnemonic.startswith("vqshrn") ||
11291          Mnemonic.startswith("vbic") || Mnemonic.startswith("vrev64") ||
11292          Mnemonic.startswith("vrev32") || Mnemonic.startswith("vrev16") ||
11293          Mnemonic.startswith("vmvn") || Mnemonic.startswith("veor") ||
11294          Mnemonic.startswith("vorn") || Mnemonic.startswith("vorr") ||
11295          Mnemonic.startswith("vand") || Mnemonic.startswith("vmul") ||
11296          Mnemonic.startswith("vqrdmulh") || Mnemonic.startswith("vqdmulh") ||
11297          Mnemonic.startswith("vsub") || Mnemonic.startswith("vadd") ||
11298          Mnemonic.startswith("vqsub") || Mnemonic.startswith("vqadd") ||
11299          Mnemonic.startswith("vabd") || Mnemonic.startswith("vrhadd") ||
11300          Mnemonic.startswith("vhsub") || Mnemonic.startswith("vhadd") ||
11301          Mnemonic.startswith("vdup") || Mnemonic.startswith("vcls") ||
11302          Mnemonic.startswith("vclz") || Mnemonic.startswith("vneg") ||
11303          Mnemonic.startswith("vabs") || Mnemonic.startswith("vqneg") ||
11304          Mnemonic.startswith("vqabs") ||
11305          (Mnemonic.startswith("vrint") && Mnemonic != "vrintr") ||
11306          Mnemonic.startswith("vcmla") || Mnemonic.startswith("vfma") ||
11307          Mnemonic.startswith("vfms") || Mnemonic.startswith("vcadd") ||
11308          Mnemonic.startswith("vadd") || Mnemonic.startswith("vsub") ||
11309          Mnemonic.startswith("vshl") || Mnemonic.startswith("vqshl") ||
11310          Mnemonic.startswith("vqrshl") || Mnemonic.startswith("vrshl") ||
11311          Mnemonic.startswith("vsri") || Mnemonic.startswith("vsli") ||
11312          Mnemonic.startswith("vrshr") || Mnemonic.startswith("vshr") ||
11313          Mnemonic.startswith("vpsel") || Mnemonic.startswith("vcmp") ||
11314          Mnemonic.startswith("vqdmladh") || Mnemonic.startswith("vqrdmladh") ||
11315          Mnemonic.startswith("vqdmlsdh") || Mnemonic.startswith("vqrdmlsdh") ||
11316          Mnemonic.startswith("vcmul") || Mnemonic.startswith("vrmulh") ||
11317          Mnemonic.startswith("vqmovn") || Mnemonic.startswith("vqmovun") ||
11318          Mnemonic.startswith("vmovnt") || Mnemonic.startswith("vmovnb") ||
11319          Mnemonic.startswith("vmaxa") || Mnemonic.startswith("vmaxnma") ||
11320          Mnemonic.startswith("vhcadd") || Mnemonic.startswith("vadc") ||
11321          Mnemonic.startswith("vsbc") || Mnemonic.startswith("vrshr") ||
11322          Mnemonic.startswith("vshr") || Mnemonic.startswith("vstrb") ||
11323          Mnemonic.startswith("vldrb") ||
11324          (Mnemonic.startswith("vstrh") && Mnemonic != "vstrhi") ||
11325          (Mnemonic.startswith("vldrh") && Mnemonic != "vldrhi") ||
11326          Mnemonic.startswith("vstrw") || Mnemonic.startswith("vldrw") ||
11327          Mnemonic.startswith("vldrd") || Mnemonic.startswith("vstrd") ||
11328          Mnemonic.startswith("vqdmull") || Mnemonic.startswith("vbrsr") ||
11329          Mnemonic.startswith("vfmas") || Mnemonic.startswith("vmlas") ||
11330          Mnemonic.startswith("vmla") || Mnemonic.startswith("vqdmlash") ||
11331          Mnemonic.startswith("vqdmlah") || Mnemonic.startswith("vqrdmlash") ||
11332          Mnemonic.startswith("vqrdmlah") || Mnemonic.startswith("viwdup") ||
11333          Mnemonic.startswith("vdwdup") || Mnemonic.startswith("vidup") ||
11334          Mnemonic.startswith("vddup") || Mnemonic.startswith("vctp") ||
11335          Mnemonic.startswith("vpnot") || Mnemonic.startswith("vbic") ||
11336          Mnemonic.startswith("vrmlsldavh") || Mnemonic.startswith("vmlsldav") ||
11337          Mnemonic.startswith("vcvt") ||
11338          (Mnemonic.startswith("vmov") &&
11339           !(ExtraToken == ".f16" || ExtraToken == ".32" ||
11340             ExtraToken == ".16" || ExtraToken == ".8"));
11341 }
11342