1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "ARMFeatures.h"
10 #include "InstPrinter/ARMInstPrinter.h"
11 #include "Utils/ARMBaseInfo.h"
12 #include "MCTargetDesc/ARMAddressingModes.h"
13 #include "MCTargetDesc/ARMBaseInfo.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/MC/MCContext.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/MC/MCInst.h"
30 #include "llvm/MC/MCInstrDesc.h"
31 #include "llvm/MC/MCInstrInfo.h"
32 #include "llvm/MC/MCObjectFileInfo.h"
33 #include "llvm/MC/MCParser/MCAsmLexer.h"
34 #include "llvm/MC/MCParser/MCAsmParser.h"
35 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
36 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
37 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
39 #include "llvm/MC/MCRegisterInfo.h"
40 #include "llvm/MC/MCSection.h"
41 #include "llvm/MC/MCStreamer.h"
42 #include "llvm/MC/MCSubtargetInfo.h"
43 #include "llvm/MC/MCSymbol.h"
44 #include "llvm/MC/SubtargetFeature.h"
45 #include "llvm/Support/ARMBuildAttributes.h"
46 #include "llvm/Support/ARMEHABI.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Support/SMLoc.h"
53 #include "llvm/Support/TargetParser.h"
54 #include "llvm/Support/TargetRegistry.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include <algorithm>
57 #include <cassert>
58 #include <cstddef>
59 #include <cstdint>
60 #include <iterator>
61 #include <limits>
62 #include <memory>
63 #include <string>
64 #include <utility>
65 #include <vector>
66 
67 #define DEBUG_TYPE "asm-parser"
68 
69 using namespace llvm;
70 
71 namespace {
72 
73 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
74 
75 static cl::opt<ImplicitItModeTy> ImplicitItMode(
76     "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
77     cl::desc("Allow conditional instructions outdside of an IT block"),
78     cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
79                           "Accept in both ISAs, emit implicit ITs in Thumb"),
80                clEnumValN(ImplicitItModeTy::Never, "never",
81                           "Warn in ARM, reject in Thumb"),
82                clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
83                           "Accept in ARM, reject in Thumb"),
84                clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
85                           "Warn in ARM, emit implicit ITs in Thumb")));
86 
87 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
88                                         cl::init(false));
89 
90 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
91 
92 class UnwindContext {
93   using Locs = SmallVector<SMLoc, 4>;
94 
95   MCAsmParser &Parser;
96   Locs FnStartLocs;
97   Locs CantUnwindLocs;
98   Locs PersonalityLocs;
99   Locs PersonalityIndexLocs;
100   Locs HandlerDataLocs;
101   int FPReg;
102 
103 public:
104   UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
105 
106   bool hasFnStart() const { return !FnStartLocs.empty(); }
107   bool cantUnwind() const { return !CantUnwindLocs.empty(); }
108   bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
109 
110   bool hasPersonality() const {
111     return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
112   }
113 
114   void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
115   void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
116   void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
117   void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
118   void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
119 
120   void saveFPReg(int Reg) { FPReg = Reg; }
121   int getFPReg() const { return FPReg; }
122 
123   void emitFnStartLocNotes() const {
124     for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
125          FI != FE; ++FI)
126       Parser.Note(*FI, ".fnstart was specified here");
127   }
128 
129   void emitCantUnwindLocNotes() const {
130     for (Locs::const_iterator UI = CantUnwindLocs.begin(),
131                               UE = CantUnwindLocs.end(); UI != UE; ++UI)
132       Parser.Note(*UI, ".cantunwind was specified here");
133   }
134 
135   void emitHandlerDataLocNotes() const {
136     for (Locs::const_iterator HI = HandlerDataLocs.begin(),
137                               HE = HandlerDataLocs.end(); HI != HE; ++HI)
138       Parser.Note(*HI, ".handlerdata was specified here");
139   }
140 
141   void emitPersonalityLocNotes() const {
142     for (Locs::const_iterator PI = PersonalityLocs.begin(),
143                               PE = PersonalityLocs.end(),
144                               PII = PersonalityIndexLocs.begin(),
145                               PIE = PersonalityIndexLocs.end();
146          PI != PE || PII != PIE;) {
147       if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
148         Parser.Note(*PI++, ".personality was specified here");
149       else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
150         Parser.Note(*PII++, ".personalityindex was specified here");
151       else
152         llvm_unreachable(".personality and .personalityindex cannot be "
153                          "at the same location");
154     }
155   }
156 
157   void reset() {
158     FnStartLocs = Locs();
159     CantUnwindLocs = Locs();
160     PersonalityLocs = Locs();
161     HandlerDataLocs = Locs();
162     PersonalityIndexLocs = Locs();
163     FPReg = ARM::SP;
164   }
165 };
166 
167 
168 class ARMAsmParser : public MCTargetAsmParser {
169   const MCRegisterInfo *MRI;
170   UnwindContext UC;
171 
172   ARMTargetStreamer &getTargetStreamer() {
173     assert(getParser().getStreamer().getTargetStreamer() &&
174            "do not have a target streamer");
175     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
176     return static_cast<ARMTargetStreamer &>(TS);
177   }
178 
179   // Map of register aliases registers via the .req directive.
180   StringMap<unsigned> RegisterReqs;
181 
182   bool NextSymbolIsThumb;
183 
184   bool useImplicitITThumb() const {
185     return ImplicitItMode == ImplicitItModeTy::Always ||
186            ImplicitItMode == ImplicitItModeTy::ThumbOnly;
187   }
188 
189   bool useImplicitITARM() const {
190     return ImplicitItMode == ImplicitItModeTy::Always ||
191            ImplicitItMode == ImplicitItModeTy::ARMOnly;
192   }
193 
194   struct {
195     ARMCC::CondCodes Cond;    // Condition for IT block.
196     unsigned Mask:4;          // Condition mask for instructions.
197                               // Starting at first 1 (from lsb).
198                               //   '1'  condition as indicated in IT.
199                               //   '0'  inverse of condition (else).
200                               // Count of instructions in IT block is
201                               // 4 - trailingzeroes(mask)
202                               // Note that this does not have the same encoding
203                               // as in the IT instruction, which also depends
204                               // on the low bit of the condition code.
205 
206     unsigned CurPosition;     // Current position in parsing of IT
207                               // block. In range [0,4], with 0 being the IT
208                               // instruction itself. Initialized according to
209                               // count of instructions in block.  ~0U if no
210                               // active IT block.
211 
212     bool IsExplicit;          // true  - The IT instruction was present in the
213                               //         input, we should not modify it.
214                               // false - The IT instruction was added
215                               //         implicitly, we can extend it if that
216                               //         would be legal.
217   } ITState;
218 
219   SmallVector<MCInst, 4> PendingConditionalInsts;
220 
221   void flushPendingInstructions(MCStreamer &Out) override {
222     if (!inImplicitITBlock()) {
223       assert(PendingConditionalInsts.size() == 0);
224       return;
225     }
226 
227     // Emit the IT instruction
228     unsigned Mask = getITMaskEncoding();
229     MCInst ITInst;
230     ITInst.setOpcode(ARM::t2IT);
231     ITInst.addOperand(MCOperand::createImm(ITState.Cond));
232     ITInst.addOperand(MCOperand::createImm(Mask));
233     Out.EmitInstruction(ITInst, getSTI());
234 
235     // Emit the conditonal instructions
236     assert(PendingConditionalInsts.size() <= 4);
237     for (const MCInst &Inst : PendingConditionalInsts) {
238       Out.EmitInstruction(Inst, getSTI());
239     }
240     PendingConditionalInsts.clear();
241 
242     // Clear the IT state
243     ITState.Mask = 0;
244     ITState.CurPosition = ~0U;
245   }
246 
247   bool inITBlock() { return ITState.CurPosition != ~0U; }
248   bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
249   bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
250 
251   bool lastInITBlock() {
252     return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
253   }
254 
255   void forwardITPosition() {
256     if (!inITBlock()) return;
257     // Move to the next instruction in the IT block, if there is one. If not,
258     // mark the block as done, except for implicit IT blocks, which we leave
259     // open until we find an instruction that can't be added to it.
260     unsigned TZ = countTrailingZeros(ITState.Mask);
261     if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
262       ITState.CurPosition = ~0U; // Done with the IT block after this.
263   }
264 
265   // Rewind the state of the current IT block, removing the last slot from it.
266   void rewindImplicitITPosition() {
267     assert(inImplicitITBlock());
268     assert(ITState.CurPosition > 1);
269     ITState.CurPosition--;
270     unsigned TZ = countTrailingZeros(ITState.Mask);
271     unsigned NewMask = 0;
272     NewMask |= ITState.Mask & (0xC << TZ);
273     NewMask |= 0x2 << TZ;
274     ITState.Mask = NewMask;
275   }
276 
277   // Rewind the state of the current IT block, removing the last slot from it.
278   // If we were at the first slot, this closes the IT block.
279   void discardImplicitITBlock() {
280     assert(inImplicitITBlock());
281     assert(ITState.CurPosition == 1);
282     ITState.CurPosition = ~0U;
283   }
284 
285   // Return the low-subreg of a given Q register.
286   unsigned getDRegFromQReg(unsigned QReg) const {
287     return MRI->getSubReg(QReg, ARM::dsub_0);
288   }
289 
290   // Get the encoding of the IT mask, as it will appear in an IT instruction.
291   unsigned getITMaskEncoding() {
292     assert(inITBlock());
293     unsigned Mask = ITState.Mask;
294     unsigned TZ = countTrailingZeros(Mask);
295     if ((ITState.Cond & 1) == 0) {
296       assert(Mask && TZ <= 3 && "illegal IT mask value!");
297       Mask ^= (0xE << TZ) & 0xF;
298     }
299     return Mask;
300   }
301 
302   // Get the condition code corresponding to the current IT block slot.
303   ARMCC::CondCodes currentITCond() {
304     unsigned MaskBit;
305     if (ITState.CurPosition == 1)
306       MaskBit = 1;
307     else
308       MaskBit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
309 
310     return MaskBit ? ITState.Cond : ARMCC::getOppositeCondition(ITState.Cond);
311   }
312 
313   // Invert the condition of the current IT block slot without changing any
314   // other slots in the same block.
315   void invertCurrentITCondition() {
316     if (ITState.CurPosition == 1) {
317       ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
318     } else {
319       ITState.Mask ^= 1 << (5 - ITState.CurPosition);
320     }
321   }
322 
323   // Returns true if the current IT block is full (all 4 slots used).
324   bool isITBlockFull() {
325     return inITBlock() && (ITState.Mask & 1);
326   }
327 
328   // Extend the current implicit IT block to have one more slot with the given
329   // condition code.
330   void extendImplicitITBlock(ARMCC::CondCodes Cond) {
331     assert(inImplicitITBlock());
332     assert(!isITBlockFull());
333     assert(Cond == ITState.Cond ||
334            Cond == ARMCC::getOppositeCondition(ITState.Cond));
335     unsigned TZ = countTrailingZeros(ITState.Mask);
336     unsigned NewMask = 0;
337     // Keep any existing condition bits.
338     NewMask |= ITState.Mask & (0xE << TZ);
339     // Insert the new condition bit.
340     NewMask |= (Cond == ITState.Cond) << TZ;
341     // Move the trailing 1 down one bit.
342     NewMask |= 1 << (TZ - 1);
343     ITState.Mask = NewMask;
344   }
345 
346   // Create a new implicit IT block with a dummy condition code.
347   void startImplicitITBlock() {
348     assert(!inITBlock());
349     ITState.Cond = ARMCC::AL;
350     ITState.Mask = 8;
351     ITState.CurPosition = 1;
352     ITState.IsExplicit = false;
353   }
354 
355   // Create a new explicit IT block with the given condition and mask. The mask
356   // should be in the parsed format, with a 1 implying 't', regardless of the
357   // low bit of the condition.
358   void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
359     assert(!inITBlock());
360     ITState.Cond = Cond;
361     ITState.Mask = Mask;
362     ITState.CurPosition = 0;
363     ITState.IsExplicit = true;
364   }
365 
366   void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
367     return getParser().Note(L, Msg, Range);
368   }
369 
370   bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
371     return getParser().Warning(L, Msg, Range);
372   }
373 
374   bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
375     return getParser().Error(L, Msg, Range);
376   }
377 
378   bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
379                            unsigned ListNo, bool IsARPop = false);
380   bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
381                            unsigned ListNo);
382 
383   int tryParseRegister();
384   bool tryParseRegisterWithWriteBack(OperandVector &);
385   int tryParseShiftRegister(OperandVector &);
386   bool parseRegisterList(OperandVector &);
387   bool parseMemory(OperandVector &);
388   bool parseOperand(OperandVector &, StringRef Mnemonic);
389   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
390   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
391                               unsigned &ShiftAmount);
392   bool parseLiteralValues(unsigned Size, SMLoc L);
393   bool parseDirectiveThumb(SMLoc L);
394   bool parseDirectiveARM(SMLoc L);
395   bool parseDirectiveThumbFunc(SMLoc L);
396   bool parseDirectiveCode(SMLoc L);
397   bool parseDirectiveSyntax(SMLoc L);
398   bool parseDirectiveReq(StringRef Name, SMLoc L);
399   bool parseDirectiveUnreq(SMLoc L);
400   bool parseDirectiveArch(SMLoc L);
401   bool parseDirectiveEabiAttr(SMLoc L);
402   bool parseDirectiveCPU(SMLoc L);
403   bool parseDirectiveFPU(SMLoc L);
404   bool parseDirectiveFnStart(SMLoc L);
405   bool parseDirectiveFnEnd(SMLoc L);
406   bool parseDirectiveCantUnwind(SMLoc L);
407   bool parseDirectivePersonality(SMLoc L);
408   bool parseDirectiveHandlerData(SMLoc L);
409   bool parseDirectiveSetFP(SMLoc L);
410   bool parseDirectivePad(SMLoc L);
411   bool parseDirectiveRegSave(SMLoc L, bool IsVector);
412   bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
413   bool parseDirectiveLtorg(SMLoc L);
414   bool parseDirectiveEven(SMLoc L);
415   bool parseDirectivePersonalityIndex(SMLoc L);
416   bool parseDirectiveUnwindRaw(SMLoc L);
417   bool parseDirectiveTLSDescSeq(SMLoc L);
418   bool parseDirectiveMovSP(SMLoc L);
419   bool parseDirectiveObjectArch(SMLoc L);
420   bool parseDirectiveArchExtension(SMLoc L);
421   bool parseDirectiveAlign(SMLoc L);
422   bool parseDirectiveThumbSet(SMLoc L);
423 
424   StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
425                           bool &CarrySetting, unsigned &ProcessorIMod,
426                           StringRef &ITMask);
427   void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
428                              bool &CanAcceptCarrySet,
429                              bool &CanAcceptPredicationCode);
430 
431   void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
432                                      OperandVector &Operands);
433   bool isThumb() const {
434     // FIXME: Can tablegen auto-generate this?
435     return getSTI().getFeatureBits()[ARM::ModeThumb];
436   }
437 
438   bool isThumbOne() const {
439     return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
440   }
441 
442   bool isThumbTwo() const {
443     return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
444   }
445 
446   bool hasThumb() const {
447     return getSTI().getFeatureBits()[ARM::HasV4TOps];
448   }
449 
450   bool hasThumb2() const {
451     return getSTI().getFeatureBits()[ARM::FeatureThumb2];
452   }
453 
454   bool hasV6Ops() const {
455     return getSTI().getFeatureBits()[ARM::HasV6Ops];
456   }
457 
458   bool hasV6T2Ops() const {
459     return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
460   }
461 
462   bool hasV6MOps() const {
463     return getSTI().getFeatureBits()[ARM::HasV6MOps];
464   }
465 
466   bool hasV7Ops() const {
467     return getSTI().getFeatureBits()[ARM::HasV7Ops];
468   }
469 
470   bool hasV8Ops() const {
471     return getSTI().getFeatureBits()[ARM::HasV8Ops];
472   }
473 
474   bool hasV8MBaseline() const {
475     return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
476   }
477 
478   bool hasV8MMainline() const {
479     return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
480   }
481 
482   bool has8MSecExt() const {
483     return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
484   }
485 
486   bool hasARM() const {
487     return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
488   }
489 
490   bool hasDSP() const {
491     return getSTI().getFeatureBits()[ARM::FeatureDSP];
492   }
493 
494   bool hasD16() const {
495     return getSTI().getFeatureBits()[ARM::FeatureD16];
496   }
497 
498   bool hasV8_1aOps() const {
499     return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
500   }
501 
502   bool hasRAS() const {
503     return getSTI().getFeatureBits()[ARM::FeatureRAS];
504   }
505 
506   void SwitchMode() {
507     MCSubtargetInfo &STI = copySTI();
508     auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
509     setAvailableFeatures(FB);
510   }
511 
512   void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
513 
514   bool isMClass() const {
515     return getSTI().getFeatureBits()[ARM::FeatureMClass];
516   }
517 
518   /// @name Auto-generated Match Functions
519   /// {
520 
521 #define GET_ASSEMBLER_HEADER
522 #include "ARMGenAsmMatcher.inc"
523 
524   /// }
525 
526   OperandMatchResultTy parseITCondCode(OperandVector &);
527   OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
528   OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
529   OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
530   OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
531   OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &);
532   OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
533   OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
534   OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
535   OperandMatchResultTy parseBankedRegOperand(OperandVector &);
536   OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
537                                    int High);
538   OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
539     return parsePKHImm(O, "lsl", 0, 31);
540   }
541   OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
542     return parsePKHImm(O, "asr", 1, 32);
543   }
544   OperandMatchResultTy parseSetEndImm(OperandVector &);
545   OperandMatchResultTy parseShifterImm(OperandVector &);
546   OperandMatchResultTy parseRotImm(OperandVector &);
547   OperandMatchResultTy parseModImm(OperandVector &);
548   OperandMatchResultTy parseBitfield(OperandVector &);
549   OperandMatchResultTy parsePostIdxReg(OperandVector &);
550   OperandMatchResultTy parseAM3Offset(OperandVector &);
551   OperandMatchResultTy parseFPImm(OperandVector &);
552   OperandMatchResultTy parseVectorList(OperandVector &);
553   OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
554                                        SMLoc &EndLoc);
555 
556   // Asm Match Converter Methods
557   void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
558   void cvtThumbBranches(MCInst &Inst, const OperandVector &);
559 
560   bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
561   bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
562   bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
563   bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
564   bool isITBlockTerminator(MCInst &Inst) const;
565   void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
566   bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands,
567                         bool Load, bool ARMMode, bool Writeback);
568 
569 public:
570   enum ARMMatchResultTy {
571     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
572     Match_RequiresNotITBlock,
573     Match_RequiresV6,
574     Match_RequiresThumb2,
575     Match_RequiresV8,
576     Match_RequiresFlagSetting,
577 #define GET_OPERAND_DIAGNOSTIC_TYPES
578 #include "ARMGenAsmMatcher.inc"
579 
580   };
581 
582   ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
583                const MCInstrInfo &MII, const MCTargetOptions &Options)
584     : MCTargetAsmParser(Options, STI, MII), UC(Parser) {
585     MCAsmParserExtension::Initialize(Parser);
586 
587     // Cache the MCRegisterInfo.
588     MRI = getContext().getRegisterInfo();
589 
590     // Initialize the set of available features.
591     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
592 
593     // Add build attributes based on the selected target.
594     if (AddBuildAttributes)
595       getTargetStreamer().emitTargetAttributes(STI);
596 
597     // Not in an ITBlock to start with.
598     ITState.CurPosition = ~0U;
599 
600     NextSymbolIsThumb = false;
601   }
602 
603   // Implementation of the MCTargetAsmParser interface:
604   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
605   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
606                         SMLoc NameLoc, OperandVector &Operands) override;
607   bool ParseDirective(AsmToken DirectiveID) override;
608 
609   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
610                                       unsigned Kind) override;
611   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
612 
613   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
614                                OperandVector &Operands, MCStreamer &Out,
615                                uint64_t &ErrorInfo,
616                                bool MatchingInlineAsm) override;
617   unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
618                             SmallVectorImpl<NearMissInfo> &NearMisses,
619                             bool MatchingInlineAsm, bool &EmitInITBlock,
620                             MCStreamer &Out);
621 
622   struct NearMissMessage {
623     SMLoc Loc;
624     SmallString<128> Message;
625   };
626 
627   const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
628 
629   void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
630                         SmallVectorImpl<NearMissMessage> &NearMissesOut,
631                         SMLoc IDLoc, OperandVector &Operands);
632   void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
633                         OperandVector &Operands);
634 
635   void doBeforeLabelEmit(MCSymbol *Symbol) override;
636 
637   void onLabelParsed(MCSymbol *Symbol) override;
638 };
639 
640 /// ARMOperand - Instances of this class represent a parsed ARM machine
641 /// operand.
642 class ARMOperand : public MCParsedAsmOperand {
643   enum KindTy {
644     k_CondCode,
645     k_CCOut,
646     k_ITCondMask,
647     k_CoprocNum,
648     k_CoprocReg,
649     k_CoprocOption,
650     k_Immediate,
651     k_MemBarrierOpt,
652     k_InstSyncBarrierOpt,
653     k_TraceSyncBarrierOpt,
654     k_Memory,
655     k_PostIndexRegister,
656     k_MSRMask,
657     k_BankedReg,
658     k_ProcIFlags,
659     k_VectorIndex,
660     k_Register,
661     k_RegisterList,
662     k_DPRRegisterList,
663     k_SPRRegisterList,
664     k_VectorList,
665     k_VectorListAllLanes,
666     k_VectorListIndexed,
667     k_ShiftedRegister,
668     k_ShiftedImmediate,
669     k_ShifterImmediate,
670     k_RotateImmediate,
671     k_ModifiedImmediate,
672     k_ConstantPoolImmediate,
673     k_BitfieldDescriptor,
674     k_Token,
675   } Kind;
676 
677   SMLoc StartLoc, EndLoc, AlignmentLoc;
678   SmallVector<unsigned, 8> Registers;
679 
680   struct CCOp {
681     ARMCC::CondCodes Val;
682   };
683 
684   struct CopOp {
685     unsigned Val;
686   };
687 
688   struct CoprocOptionOp {
689     unsigned Val;
690   };
691 
692   struct ITMaskOp {
693     unsigned Mask:4;
694   };
695 
696   struct MBOptOp {
697     ARM_MB::MemBOpt Val;
698   };
699 
700   struct ISBOptOp {
701     ARM_ISB::InstSyncBOpt Val;
702   };
703 
704   struct TSBOptOp {
705     ARM_TSB::TraceSyncBOpt Val;
706   };
707 
708   struct IFlagsOp {
709     ARM_PROC::IFlags Val;
710   };
711 
712   struct MMaskOp {
713     unsigned Val;
714   };
715 
716   struct BankedRegOp {
717     unsigned Val;
718   };
719 
720   struct TokOp {
721     const char *Data;
722     unsigned Length;
723   };
724 
725   struct RegOp {
726     unsigned RegNum;
727   };
728 
729   // A vector register list is a sequential list of 1 to 4 registers.
730   struct VectorListOp {
731     unsigned RegNum;
732     unsigned Count;
733     unsigned LaneIndex;
734     bool isDoubleSpaced;
735   };
736 
737   struct VectorIndexOp {
738     unsigned Val;
739   };
740 
741   struct ImmOp {
742     const MCExpr *Val;
743   };
744 
745   /// Combined record for all forms of ARM address expressions.
746   struct MemoryOp {
747     unsigned BaseRegNum;
748     // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
749     // was specified.
750     const MCConstantExpr *OffsetImm;  // Offset immediate value
751     unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
752     ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
753     unsigned ShiftImm;        // shift for OffsetReg.
754     unsigned Alignment;       // 0 = no alignment specified
755     // n = alignment in bytes (2, 4, 8, 16, or 32)
756     unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
757   };
758 
759   struct PostIdxRegOp {
760     unsigned RegNum;
761     bool isAdd;
762     ARM_AM::ShiftOpc ShiftTy;
763     unsigned ShiftImm;
764   };
765 
766   struct ShifterImmOp {
767     bool isASR;
768     unsigned Imm;
769   };
770 
771   struct RegShiftedRegOp {
772     ARM_AM::ShiftOpc ShiftTy;
773     unsigned SrcReg;
774     unsigned ShiftReg;
775     unsigned ShiftImm;
776   };
777 
778   struct RegShiftedImmOp {
779     ARM_AM::ShiftOpc ShiftTy;
780     unsigned SrcReg;
781     unsigned ShiftImm;
782   };
783 
784   struct RotImmOp {
785     unsigned Imm;
786   };
787 
788   struct ModImmOp {
789     unsigned Bits;
790     unsigned Rot;
791   };
792 
793   struct BitfieldOp {
794     unsigned LSB;
795     unsigned Width;
796   };
797 
798   union {
799     struct CCOp CC;
800     struct CopOp Cop;
801     struct CoprocOptionOp CoprocOption;
802     struct MBOptOp MBOpt;
803     struct ISBOptOp ISBOpt;
804     struct TSBOptOp TSBOpt;
805     struct ITMaskOp ITMask;
806     struct IFlagsOp IFlags;
807     struct MMaskOp MMask;
808     struct BankedRegOp BankedReg;
809     struct TokOp Tok;
810     struct RegOp Reg;
811     struct VectorListOp VectorList;
812     struct VectorIndexOp VectorIndex;
813     struct ImmOp Imm;
814     struct MemoryOp Memory;
815     struct PostIdxRegOp PostIdxReg;
816     struct ShifterImmOp ShifterImm;
817     struct RegShiftedRegOp RegShiftedReg;
818     struct RegShiftedImmOp RegShiftedImm;
819     struct RotImmOp RotImm;
820     struct ModImmOp ModImm;
821     struct BitfieldOp Bitfield;
822   };
823 
824 public:
825   ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
826 
827   /// getStartLoc - Get the location of the first token of this operand.
828   SMLoc getStartLoc() const override { return StartLoc; }
829 
830   /// getEndLoc - Get the location of the last token of this operand.
831   SMLoc getEndLoc() const override { return EndLoc; }
832 
833   /// getLocRange - Get the range between the first and last token of this
834   /// operand.
835   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
836 
837   /// getAlignmentLoc - Get the location of the Alignment token of this operand.
838   SMLoc getAlignmentLoc() const {
839     assert(Kind == k_Memory && "Invalid access!");
840     return AlignmentLoc;
841   }
842 
843   ARMCC::CondCodes getCondCode() const {
844     assert(Kind == k_CondCode && "Invalid access!");
845     return CC.Val;
846   }
847 
848   unsigned getCoproc() const {
849     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
850     return Cop.Val;
851   }
852 
853   StringRef getToken() const {
854     assert(Kind == k_Token && "Invalid access!");
855     return StringRef(Tok.Data, Tok.Length);
856   }
857 
858   unsigned getReg() const override {
859     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
860     return Reg.RegNum;
861   }
862 
863   const SmallVectorImpl<unsigned> &getRegList() const {
864     assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
865             Kind == k_SPRRegisterList) && "Invalid access!");
866     return Registers;
867   }
868 
869   const MCExpr *getImm() const {
870     assert(isImm() && "Invalid access!");
871     return Imm.Val;
872   }
873 
874   const MCExpr *getConstantPoolImm() const {
875     assert(isConstantPoolImm() && "Invalid access!");
876     return Imm.Val;
877   }
878 
879   unsigned getVectorIndex() const {
880     assert(Kind == k_VectorIndex && "Invalid access!");
881     return VectorIndex.Val;
882   }
883 
884   ARM_MB::MemBOpt getMemBarrierOpt() const {
885     assert(Kind == k_MemBarrierOpt && "Invalid access!");
886     return MBOpt.Val;
887   }
888 
889   ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
890     assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
891     return ISBOpt.Val;
892   }
893 
894   ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const {
895     assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!");
896     return TSBOpt.Val;
897   }
898 
899   ARM_PROC::IFlags getProcIFlags() const {
900     assert(Kind == k_ProcIFlags && "Invalid access!");
901     return IFlags.Val;
902   }
903 
904   unsigned getMSRMask() const {
905     assert(Kind == k_MSRMask && "Invalid access!");
906     return MMask.Val;
907   }
908 
909   unsigned getBankedReg() const {
910     assert(Kind == k_BankedReg && "Invalid access!");
911     return BankedReg.Val;
912   }
913 
914   bool isCoprocNum() const { return Kind == k_CoprocNum; }
915   bool isCoprocReg() const { return Kind == k_CoprocReg; }
916   bool isCoprocOption() const { return Kind == k_CoprocOption; }
917   bool isCondCode() const { return Kind == k_CondCode; }
918   bool isCCOut() const { return Kind == k_CCOut; }
919   bool isITMask() const { return Kind == k_ITCondMask; }
920   bool isITCondCode() const { return Kind == k_CondCode; }
921   bool isImm() const override {
922     return Kind == k_Immediate;
923   }
924 
925   bool isARMBranchTarget() const {
926     if (!isImm()) return false;
927 
928     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
929       return CE->getValue() % 4 == 0;
930     return true;
931   }
932 
933 
934   bool isThumbBranchTarget() const {
935     if (!isImm()) return false;
936 
937     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
938       return CE->getValue() % 2 == 0;
939     return true;
940   }
941 
942   // checks whether this operand is an unsigned offset which fits is a field
943   // of specified width and scaled by a specific number of bits
944   template<unsigned width, unsigned scale>
945   bool isUnsignedOffset() const {
946     if (!isImm()) return false;
947     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
948     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
949       int64_t Val = CE->getValue();
950       int64_t Align = 1LL << scale;
951       int64_t Max = Align * ((1LL << width) - 1);
952       return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
953     }
954     return false;
955   }
956 
957   // checks whether this operand is an signed offset which fits is a field
958   // of specified width and scaled by a specific number of bits
959   template<unsigned width, unsigned scale>
960   bool isSignedOffset() const {
961     if (!isImm()) return false;
962     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
963     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
964       int64_t Val = CE->getValue();
965       int64_t Align = 1LL << scale;
966       int64_t Max = Align * ((1LL << (width-1)) - 1);
967       int64_t Min = -Align * (1LL << (width-1));
968       return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
969     }
970     return false;
971   }
972 
973   // checks whether this operand is a memory operand computed as an offset
974   // applied to PC. the offset may have 8 bits of magnitude and is represented
975   // with two bits of shift. textually it may be either [pc, #imm], #imm or
976   // relocable expression...
977   bool isThumbMemPC() const {
978     int64_t Val = 0;
979     if (isImm()) {
980       if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
981       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
982       if (!CE) return false;
983       Val = CE->getValue();
984     }
985     else if (isMem()) {
986       if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
987       if(Memory.BaseRegNum != ARM::PC) return false;
988       Val = Memory.OffsetImm->getValue();
989     }
990     else return false;
991     return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
992   }
993 
994   bool isFPImm() const {
995     if (!isImm()) return false;
996     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
997     if (!CE) return false;
998     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
999     return Val != -1;
1000   }
1001 
1002   template<int64_t N, int64_t M>
1003   bool isImmediate() const {
1004     if (!isImm()) return false;
1005     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1006     if (!CE) return false;
1007     int64_t Value = CE->getValue();
1008     return Value >= N && Value <= M;
1009   }
1010 
1011   template<int64_t N, int64_t M>
1012   bool isImmediateS4() const {
1013     if (!isImm()) return false;
1014     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1015     if (!CE) return false;
1016     int64_t Value = CE->getValue();
1017     return ((Value & 3) == 0) && Value >= N && Value <= M;
1018   }
1019 
1020   bool isFBits16() const {
1021     return isImmediate<0, 17>();
1022   }
1023   bool isFBits32() const {
1024     return isImmediate<1, 33>();
1025   }
1026   bool isImm8s4() const {
1027     return isImmediateS4<-1020, 1020>();
1028   }
1029   bool isImm0_1020s4() const {
1030     return isImmediateS4<0, 1020>();
1031   }
1032   bool isImm0_508s4() const {
1033     return isImmediateS4<0, 508>();
1034   }
1035   bool isImm0_508s4Neg() const {
1036     if (!isImm()) return false;
1037     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1038     if (!CE) return false;
1039     int64_t Value = -CE->getValue();
1040     // explicitly exclude zero. we want that to use the normal 0_508 version.
1041     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1042   }
1043 
1044   bool isImm0_4095Neg() const {
1045     if (!isImm()) return false;
1046     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1047     if (!CE) return false;
1048     // isImm0_4095Neg is used with 32-bit immediates only.
1049     // 32-bit immediates are zero extended to 64-bit when parsed,
1050     // thus simple -CE->getValue() results in a big negative number,
1051     // not a small positive number as intended
1052     if ((CE->getValue() >> 32) > 0) return false;
1053     uint32_t Value = -static_cast<uint32_t>(CE->getValue());
1054     return Value > 0 && Value < 4096;
1055   }
1056 
1057   bool isImm0_7() const {
1058     return isImmediate<0, 7>();
1059   }
1060 
1061   bool isImm1_16() const {
1062     return isImmediate<1, 16>();
1063   }
1064 
1065   bool isImm1_32() const {
1066     return isImmediate<1, 32>();
1067   }
1068 
1069   bool isImm8_255() const {
1070     return isImmediate<8, 255>();
1071   }
1072 
1073   bool isImm256_65535Expr() const {
1074     if (!isImm()) return false;
1075     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1076     // If it's not a constant expression, it'll generate a fixup and be
1077     // handled later.
1078     if (!CE) return true;
1079     int64_t Value = CE->getValue();
1080     return Value >= 256 && Value < 65536;
1081   }
1082 
1083   bool isImm0_65535Expr() const {
1084     if (!isImm()) return false;
1085     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1086     // If it's not a constant expression, it'll generate a fixup and be
1087     // handled later.
1088     if (!CE) return true;
1089     int64_t Value = CE->getValue();
1090     return Value >= 0 && Value < 65536;
1091   }
1092 
1093   bool isImm24bit() const {
1094     return isImmediate<0, 0xffffff + 1>();
1095   }
1096 
1097   bool isImmThumbSR() const {
1098     return isImmediate<1, 33>();
1099   }
1100 
1101   bool isPKHLSLImm() const {
1102     return isImmediate<0, 32>();
1103   }
1104 
1105   bool isPKHASRImm() const {
1106     return isImmediate<0, 33>();
1107   }
1108 
1109   bool isAdrLabel() const {
1110     // If we have an immediate that's not a constant, treat it as a label
1111     // reference needing a fixup.
1112     if (isImm() && !isa<MCConstantExpr>(getImm()))
1113       return true;
1114 
1115     // If it is a constant, it must fit into a modified immediate encoding.
1116     if (!isImm()) return false;
1117     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1118     if (!CE) return false;
1119     int64_t Value = CE->getValue();
1120     return (ARM_AM::getSOImmVal(Value) != -1 ||
1121             ARM_AM::getSOImmVal(-Value) != -1);
1122   }
1123 
1124   bool isT2SOImm() const {
1125     // If we have an immediate that's not a constant, treat it as an expression
1126     // needing a fixup.
1127     if (isImm() && !isa<MCConstantExpr>(getImm())) {
1128       // We want to avoid matching :upper16: and :lower16: as we want these
1129       // expressions to match in isImm0_65535Expr()
1130       const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1131       return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1132                              ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1133     }
1134     if (!isImm()) return false;
1135     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1136     if (!CE) return false;
1137     int64_t Value = CE->getValue();
1138     return ARM_AM::getT2SOImmVal(Value) != -1;
1139   }
1140 
1141   bool isT2SOImmNot() const {
1142     if (!isImm()) return false;
1143     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1144     if (!CE) return false;
1145     int64_t Value = CE->getValue();
1146     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1147       ARM_AM::getT2SOImmVal(~Value) != -1;
1148   }
1149 
1150   bool isT2SOImmNeg() const {
1151     if (!isImm()) return false;
1152     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1153     if (!CE) return false;
1154     int64_t Value = CE->getValue();
1155     // Only use this when not representable as a plain so_imm.
1156     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1157       ARM_AM::getT2SOImmVal(-Value) != -1;
1158   }
1159 
1160   bool isSetEndImm() const {
1161     if (!isImm()) return false;
1162     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1163     if (!CE) return false;
1164     int64_t Value = CE->getValue();
1165     return Value == 1 || Value == 0;
1166   }
1167 
1168   bool isReg() const override { return Kind == k_Register; }
1169   bool isRegList() const { return Kind == k_RegisterList; }
1170   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1171   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1172   bool isToken() const override { return Kind == k_Token; }
1173   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1174   bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1175   bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; }
1176   bool isMem() const override {
1177     if (Kind != k_Memory)
1178       return false;
1179     if (Memory.BaseRegNum &&
1180         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum))
1181       return false;
1182     if (Memory.OffsetRegNum &&
1183         !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum))
1184       return false;
1185     return true;
1186   }
1187   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1188   bool isRegShiftedReg() const {
1189     return Kind == k_ShiftedRegister &&
1190            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1191                RegShiftedReg.SrcReg) &&
1192            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1193                RegShiftedReg.ShiftReg);
1194   }
1195   bool isRegShiftedImm() const {
1196     return Kind == k_ShiftedImmediate &&
1197            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(
1198                RegShiftedImm.SrcReg);
1199   }
1200   bool isRotImm() const { return Kind == k_RotateImmediate; }
1201   bool isModImm() const { return Kind == k_ModifiedImmediate; }
1202 
1203   bool isModImmNot() const {
1204     if (!isImm()) return false;
1205     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1206     if (!CE) return false;
1207     int64_t Value = CE->getValue();
1208     return ARM_AM::getSOImmVal(~Value) != -1;
1209   }
1210 
1211   bool isModImmNeg() const {
1212     if (!isImm()) return false;
1213     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1214     if (!CE) return false;
1215     int64_t Value = CE->getValue();
1216     return ARM_AM::getSOImmVal(Value) == -1 &&
1217       ARM_AM::getSOImmVal(-Value) != -1;
1218   }
1219 
1220   bool isThumbModImmNeg1_7() const {
1221     if (!isImm()) return false;
1222     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1223     if (!CE) return false;
1224     int32_t Value = -(int32_t)CE->getValue();
1225     return 0 < Value && Value < 8;
1226   }
1227 
1228   bool isThumbModImmNeg8_255() const {
1229     if (!isImm()) return false;
1230     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1231     if (!CE) return false;
1232     int32_t Value = -(int32_t)CE->getValue();
1233     return 7 < Value && Value < 256;
1234   }
1235 
1236   bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1237   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1238   bool isPostIdxRegShifted() const {
1239     return Kind == k_PostIndexRegister &&
1240            ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum);
1241   }
1242   bool isPostIdxReg() const {
1243     return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift;
1244   }
1245   bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1246     if (!isMem())
1247       return false;
1248     // No offset of any kind.
1249     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1250      (alignOK || Memory.Alignment == Alignment);
1251   }
1252   bool isMemPCRelImm12() const {
1253     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1254       return false;
1255     // Base register must be PC.
1256     if (Memory.BaseRegNum != ARM::PC)
1257       return false;
1258     // Immediate offset in range [-4095, 4095].
1259     if (!Memory.OffsetImm) return true;
1260     int64_t Val = Memory.OffsetImm->getValue();
1261     return (Val > -4096 && Val < 4096) ||
1262            (Val == std::numeric_limits<int32_t>::min());
1263   }
1264 
1265   bool isAlignedMemory() const {
1266     return isMemNoOffset(true);
1267   }
1268 
1269   bool isAlignedMemoryNone() const {
1270     return isMemNoOffset(false, 0);
1271   }
1272 
1273   bool isDupAlignedMemoryNone() const {
1274     return isMemNoOffset(false, 0);
1275   }
1276 
1277   bool isAlignedMemory16() const {
1278     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1279       return true;
1280     return isMemNoOffset(false, 0);
1281   }
1282 
1283   bool isDupAlignedMemory16() const {
1284     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1285       return true;
1286     return isMemNoOffset(false, 0);
1287   }
1288 
1289   bool isAlignedMemory32() const {
1290     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1291       return true;
1292     return isMemNoOffset(false, 0);
1293   }
1294 
1295   bool isDupAlignedMemory32() const {
1296     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1297       return true;
1298     return isMemNoOffset(false, 0);
1299   }
1300 
1301   bool isAlignedMemory64() const {
1302     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1303       return true;
1304     return isMemNoOffset(false, 0);
1305   }
1306 
1307   bool isDupAlignedMemory64() const {
1308     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1309       return true;
1310     return isMemNoOffset(false, 0);
1311   }
1312 
1313   bool isAlignedMemory64or128() const {
1314     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1315       return true;
1316     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1317       return true;
1318     return isMemNoOffset(false, 0);
1319   }
1320 
1321   bool isDupAlignedMemory64or128() const {
1322     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1323       return true;
1324     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1325       return true;
1326     return isMemNoOffset(false, 0);
1327   }
1328 
1329   bool isAlignedMemory64or128or256() const {
1330     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1331       return true;
1332     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1333       return true;
1334     if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1335       return true;
1336     return isMemNoOffset(false, 0);
1337   }
1338 
1339   bool isAddrMode2() const {
1340     if (!isMem() || Memory.Alignment != 0) return false;
1341     // Check for register offset.
1342     if (Memory.OffsetRegNum) return true;
1343     // Immediate offset in range [-4095, 4095].
1344     if (!Memory.OffsetImm) return true;
1345     int64_t Val = Memory.OffsetImm->getValue();
1346     return Val > -4096 && Val < 4096;
1347   }
1348 
1349   bool isAM2OffsetImm() const {
1350     if (!isImm()) return false;
1351     // Immediate offset in range [-4095, 4095].
1352     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1353     if (!CE) return false;
1354     int64_t Val = CE->getValue();
1355     return (Val == std::numeric_limits<int32_t>::min()) ||
1356            (Val > -4096 && Val < 4096);
1357   }
1358 
1359   bool isAddrMode3() const {
1360     // If we have an immediate that's not a constant, treat it as a label
1361     // reference needing a fixup. If it is a constant, it's something else
1362     // and we reject it.
1363     if (isImm() && !isa<MCConstantExpr>(getImm()))
1364       return true;
1365     if (!isMem() || Memory.Alignment != 0) return false;
1366     // No shifts are legal for AM3.
1367     if (Memory.ShiftType != ARM_AM::no_shift) return false;
1368     // Check for register offset.
1369     if (Memory.OffsetRegNum) return true;
1370     // Immediate offset in range [-255, 255].
1371     if (!Memory.OffsetImm) return true;
1372     int64_t Val = Memory.OffsetImm->getValue();
1373     // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we
1374     // have to check for this too.
1375     return (Val > -256 && Val < 256) ||
1376            Val == std::numeric_limits<int32_t>::min();
1377   }
1378 
1379   bool isAM3Offset() const {
1380     if (isPostIdxReg())
1381       return true;
1382     if (!isImm())
1383       return false;
1384     // Immediate offset in range [-255, 255].
1385     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1386     if (!CE) return false;
1387     int64_t Val = CE->getValue();
1388     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1389     return (Val > -256 && Val < 256) ||
1390            Val == std::numeric_limits<int32_t>::min();
1391   }
1392 
1393   bool isAddrMode5() const {
1394     // If we have an immediate that's not a constant, treat it as a label
1395     // reference needing a fixup. If it is a constant, it's something else
1396     // and we reject it.
1397     if (isImm() && !isa<MCConstantExpr>(getImm()))
1398       return true;
1399     if (!isMem() || Memory.Alignment != 0) return false;
1400     // Check for register offset.
1401     if (Memory.OffsetRegNum) return false;
1402     // Immediate offset in range [-1020, 1020] and a multiple of 4.
1403     if (!Memory.OffsetImm) return true;
1404     int64_t Val = Memory.OffsetImm->getValue();
1405     return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1406       Val == std::numeric_limits<int32_t>::min();
1407   }
1408 
1409   bool isAddrMode5FP16() const {
1410     // If we have an immediate that's not a constant, treat it as a label
1411     // reference needing a fixup. If it is a constant, it's something else
1412     // and we reject it.
1413     if (isImm() && !isa<MCConstantExpr>(getImm()))
1414       return true;
1415     if (!isMem() || Memory.Alignment != 0) return false;
1416     // Check for register offset.
1417     if (Memory.OffsetRegNum) return false;
1418     // Immediate offset in range [-510, 510] and a multiple of 2.
1419     if (!Memory.OffsetImm) return true;
1420     int64_t Val = Memory.OffsetImm->getValue();
1421     return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1422            Val == std::numeric_limits<int32_t>::min();
1423   }
1424 
1425   bool isMemTBB() const {
1426     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1427         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1428       return false;
1429     return true;
1430   }
1431 
1432   bool isMemTBH() const {
1433     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1434         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1435         Memory.Alignment != 0 )
1436       return false;
1437     return true;
1438   }
1439 
1440   bool isMemRegOffset() const {
1441     if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1442       return false;
1443     return true;
1444   }
1445 
1446   bool isT2MemRegOffset() const {
1447     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1448         Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1449       return false;
1450     // Only lsl #{0, 1, 2, 3} allowed.
1451     if (Memory.ShiftType == ARM_AM::no_shift)
1452       return true;
1453     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1454       return false;
1455     return true;
1456   }
1457 
1458   bool isMemThumbRR() const {
1459     // Thumb reg+reg addressing is simple. Just two registers, a base and
1460     // an offset. No shifts, negations or any other complicating factors.
1461     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1462         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1463       return false;
1464     return isARMLowRegister(Memory.BaseRegNum) &&
1465       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1466   }
1467 
1468   bool isMemThumbRIs4() const {
1469     if (!isMem() || Memory.OffsetRegNum != 0 ||
1470         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1471       return false;
1472     // Immediate offset, multiple of 4 in range [0, 124].
1473     if (!Memory.OffsetImm) return true;
1474     int64_t Val = Memory.OffsetImm->getValue();
1475     return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1476   }
1477 
1478   bool isMemThumbRIs2() const {
1479     if (!isMem() || Memory.OffsetRegNum != 0 ||
1480         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1481       return false;
1482     // Immediate offset, multiple of 4 in range [0, 62].
1483     if (!Memory.OffsetImm) return true;
1484     int64_t Val = Memory.OffsetImm->getValue();
1485     return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1486   }
1487 
1488   bool isMemThumbRIs1() const {
1489     if (!isMem() || Memory.OffsetRegNum != 0 ||
1490         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1491       return false;
1492     // Immediate offset in range [0, 31].
1493     if (!Memory.OffsetImm) return true;
1494     int64_t Val = Memory.OffsetImm->getValue();
1495     return Val >= 0 && Val <= 31;
1496   }
1497 
1498   bool isMemThumbSPI() const {
1499     if (!isMem() || Memory.OffsetRegNum != 0 ||
1500         Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1501       return false;
1502     // Immediate offset, multiple of 4 in range [0, 1020].
1503     if (!Memory.OffsetImm) return true;
1504     int64_t Val = Memory.OffsetImm->getValue();
1505     return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1506   }
1507 
1508   bool isMemImm8s4Offset() const {
1509     // If we have an immediate that's not a constant, treat it as a label
1510     // reference needing a fixup. If it is a constant, it's something else
1511     // and we reject it.
1512     if (isImm() && !isa<MCConstantExpr>(getImm()))
1513       return true;
1514     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1515       return false;
1516     // Immediate offset a multiple of 4 in range [-1020, 1020].
1517     if (!Memory.OffsetImm) return true;
1518     int64_t Val = Memory.OffsetImm->getValue();
1519     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1520     return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1521            Val == std::numeric_limits<int32_t>::min();
1522   }
1523 
1524   bool isMemImm0_1020s4Offset() const {
1525     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1526       return false;
1527     // Immediate offset a multiple of 4 in range [0, 1020].
1528     if (!Memory.OffsetImm) return true;
1529     int64_t Val = Memory.OffsetImm->getValue();
1530     return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1531   }
1532 
1533   bool isMemImm8Offset() const {
1534     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1535       return false;
1536     // Base reg of PC isn't allowed for these encodings.
1537     if (Memory.BaseRegNum == ARM::PC) return false;
1538     // Immediate offset in range [-255, 255].
1539     if (!Memory.OffsetImm) return true;
1540     int64_t Val = Memory.OffsetImm->getValue();
1541     return (Val == std::numeric_limits<int32_t>::min()) ||
1542            (Val > -256 && Val < 256);
1543   }
1544 
1545   bool isMemPosImm8Offset() const {
1546     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1547       return false;
1548     // Immediate offset in range [0, 255].
1549     if (!Memory.OffsetImm) return true;
1550     int64_t Val = Memory.OffsetImm->getValue();
1551     return Val >= 0 && Val < 256;
1552   }
1553 
1554   bool isMemNegImm8Offset() const {
1555     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1556       return false;
1557     // Base reg of PC isn't allowed for these encodings.
1558     if (Memory.BaseRegNum == ARM::PC) return false;
1559     // Immediate offset in range [-255, -1].
1560     if (!Memory.OffsetImm) return false;
1561     int64_t Val = Memory.OffsetImm->getValue();
1562     return (Val == std::numeric_limits<int32_t>::min()) ||
1563            (Val > -256 && Val < 0);
1564   }
1565 
1566   bool isMemUImm12Offset() const {
1567     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1568       return false;
1569     // Immediate offset in range [0, 4095].
1570     if (!Memory.OffsetImm) return true;
1571     int64_t Val = Memory.OffsetImm->getValue();
1572     return (Val >= 0 && Val < 4096);
1573   }
1574 
1575   bool isMemImm12Offset() const {
1576     // If we have an immediate that's not a constant, treat it as a label
1577     // reference needing a fixup. If it is a constant, it's something else
1578     // and we reject it.
1579 
1580     if (isImm() && !isa<MCConstantExpr>(getImm()))
1581       return true;
1582 
1583     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1584       return false;
1585     // Immediate offset in range [-4095, 4095].
1586     if (!Memory.OffsetImm) return true;
1587     int64_t Val = Memory.OffsetImm->getValue();
1588     return (Val > -4096 && Val < 4096) ||
1589            (Val == std::numeric_limits<int32_t>::min());
1590   }
1591 
1592   bool isConstPoolAsmImm() const {
1593     // Delay processing of Constant Pool Immediate, this will turn into
1594     // a constant. Match no other operand
1595     return (isConstantPoolImm());
1596   }
1597 
1598   bool isPostIdxImm8() const {
1599     if (!isImm()) return false;
1600     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1601     if (!CE) return false;
1602     int64_t Val = CE->getValue();
1603     return (Val > -256 && Val < 256) ||
1604            (Val == std::numeric_limits<int32_t>::min());
1605   }
1606 
1607   bool isPostIdxImm8s4() const {
1608     if (!isImm()) return false;
1609     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1610     if (!CE) return false;
1611     int64_t Val = CE->getValue();
1612     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1613            (Val == std::numeric_limits<int32_t>::min());
1614   }
1615 
1616   bool isMSRMask() const { return Kind == k_MSRMask; }
1617   bool isBankedReg() const { return Kind == k_BankedReg; }
1618   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1619 
1620   // NEON operands.
1621   bool isSingleSpacedVectorList() const {
1622     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1623   }
1624 
1625   bool isDoubleSpacedVectorList() const {
1626     return Kind == k_VectorList && VectorList.isDoubleSpaced;
1627   }
1628 
1629   bool isVecListOneD() const {
1630     if (!isSingleSpacedVectorList()) return false;
1631     return VectorList.Count == 1;
1632   }
1633 
1634   bool isVecListDPair() const {
1635     if (!isSingleSpacedVectorList()) return false;
1636     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1637               .contains(VectorList.RegNum));
1638   }
1639 
1640   bool isVecListThreeD() const {
1641     if (!isSingleSpacedVectorList()) return false;
1642     return VectorList.Count == 3;
1643   }
1644 
1645   bool isVecListFourD() const {
1646     if (!isSingleSpacedVectorList()) return false;
1647     return VectorList.Count == 4;
1648   }
1649 
1650   bool isVecListDPairSpaced() const {
1651     if (Kind != k_VectorList) return false;
1652     if (isSingleSpacedVectorList()) return false;
1653     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1654               .contains(VectorList.RegNum));
1655   }
1656 
1657   bool isVecListThreeQ() const {
1658     if (!isDoubleSpacedVectorList()) return false;
1659     return VectorList.Count == 3;
1660   }
1661 
1662   bool isVecListFourQ() const {
1663     if (!isDoubleSpacedVectorList()) return false;
1664     return VectorList.Count == 4;
1665   }
1666 
1667   bool isSingleSpacedVectorAllLanes() const {
1668     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1669   }
1670 
1671   bool isDoubleSpacedVectorAllLanes() const {
1672     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1673   }
1674 
1675   bool isVecListOneDAllLanes() const {
1676     if (!isSingleSpacedVectorAllLanes()) return false;
1677     return VectorList.Count == 1;
1678   }
1679 
1680   bool isVecListDPairAllLanes() const {
1681     if (!isSingleSpacedVectorAllLanes()) return false;
1682     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1683               .contains(VectorList.RegNum));
1684   }
1685 
1686   bool isVecListDPairSpacedAllLanes() const {
1687     if (!isDoubleSpacedVectorAllLanes()) return false;
1688     return VectorList.Count == 2;
1689   }
1690 
1691   bool isVecListThreeDAllLanes() const {
1692     if (!isSingleSpacedVectorAllLanes()) return false;
1693     return VectorList.Count == 3;
1694   }
1695 
1696   bool isVecListThreeQAllLanes() const {
1697     if (!isDoubleSpacedVectorAllLanes()) return false;
1698     return VectorList.Count == 3;
1699   }
1700 
1701   bool isVecListFourDAllLanes() const {
1702     if (!isSingleSpacedVectorAllLanes()) return false;
1703     return VectorList.Count == 4;
1704   }
1705 
1706   bool isVecListFourQAllLanes() const {
1707     if (!isDoubleSpacedVectorAllLanes()) return false;
1708     return VectorList.Count == 4;
1709   }
1710 
1711   bool isSingleSpacedVectorIndexed() const {
1712     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1713   }
1714 
1715   bool isDoubleSpacedVectorIndexed() const {
1716     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1717   }
1718 
1719   bool isVecListOneDByteIndexed() const {
1720     if (!isSingleSpacedVectorIndexed()) return false;
1721     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1722   }
1723 
1724   bool isVecListOneDHWordIndexed() const {
1725     if (!isSingleSpacedVectorIndexed()) return false;
1726     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1727   }
1728 
1729   bool isVecListOneDWordIndexed() const {
1730     if (!isSingleSpacedVectorIndexed()) return false;
1731     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1732   }
1733 
1734   bool isVecListTwoDByteIndexed() const {
1735     if (!isSingleSpacedVectorIndexed()) return false;
1736     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1737   }
1738 
1739   bool isVecListTwoDHWordIndexed() const {
1740     if (!isSingleSpacedVectorIndexed()) return false;
1741     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1742   }
1743 
1744   bool isVecListTwoQWordIndexed() const {
1745     if (!isDoubleSpacedVectorIndexed()) return false;
1746     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1747   }
1748 
1749   bool isVecListTwoQHWordIndexed() const {
1750     if (!isDoubleSpacedVectorIndexed()) return false;
1751     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1752   }
1753 
1754   bool isVecListTwoDWordIndexed() const {
1755     if (!isSingleSpacedVectorIndexed()) return false;
1756     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1757   }
1758 
1759   bool isVecListThreeDByteIndexed() const {
1760     if (!isSingleSpacedVectorIndexed()) return false;
1761     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1762   }
1763 
1764   bool isVecListThreeDHWordIndexed() const {
1765     if (!isSingleSpacedVectorIndexed()) return false;
1766     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1767   }
1768 
1769   bool isVecListThreeQWordIndexed() const {
1770     if (!isDoubleSpacedVectorIndexed()) return false;
1771     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1772   }
1773 
1774   bool isVecListThreeQHWordIndexed() const {
1775     if (!isDoubleSpacedVectorIndexed()) return false;
1776     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1777   }
1778 
1779   bool isVecListThreeDWordIndexed() const {
1780     if (!isSingleSpacedVectorIndexed()) return false;
1781     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1782   }
1783 
1784   bool isVecListFourDByteIndexed() const {
1785     if (!isSingleSpacedVectorIndexed()) return false;
1786     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1787   }
1788 
1789   bool isVecListFourDHWordIndexed() const {
1790     if (!isSingleSpacedVectorIndexed()) return false;
1791     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1792   }
1793 
1794   bool isVecListFourQWordIndexed() const {
1795     if (!isDoubleSpacedVectorIndexed()) return false;
1796     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1797   }
1798 
1799   bool isVecListFourQHWordIndexed() const {
1800     if (!isDoubleSpacedVectorIndexed()) return false;
1801     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1802   }
1803 
1804   bool isVecListFourDWordIndexed() const {
1805     if (!isSingleSpacedVectorIndexed()) return false;
1806     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1807   }
1808 
1809   bool isVectorIndex8() const {
1810     if (Kind != k_VectorIndex) return false;
1811     return VectorIndex.Val < 8;
1812   }
1813 
1814   bool isVectorIndex16() const {
1815     if (Kind != k_VectorIndex) return false;
1816     return VectorIndex.Val < 4;
1817   }
1818 
1819   bool isVectorIndex32() const {
1820     if (Kind != k_VectorIndex) return false;
1821     return VectorIndex.Val < 2;
1822   }
1823   bool isVectorIndex64() const {
1824     if (Kind != k_VectorIndex) return false;
1825     return VectorIndex.Val < 1;
1826   }
1827 
1828   bool isNEONi8splat() const {
1829     if (!isImm()) return false;
1830     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1831     // Must be a constant.
1832     if (!CE) return false;
1833     int64_t Value = CE->getValue();
1834     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1835     // value.
1836     return Value >= 0 && Value < 256;
1837   }
1838 
1839   bool isNEONi16splat() const {
1840     if (isNEONByteReplicate(2))
1841       return false; // Leave that for bytes replication and forbid by default.
1842     if (!isImm())
1843       return false;
1844     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1845     // Must be a constant.
1846     if (!CE) return false;
1847     unsigned Value = CE->getValue();
1848     return ARM_AM::isNEONi16splat(Value);
1849   }
1850 
1851   bool isNEONi16splatNot() const {
1852     if (!isImm())
1853       return false;
1854     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1855     // Must be a constant.
1856     if (!CE) return false;
1857     unsigned Value = CE->getValue();
1858     return ARM_AM::isNEONi16splat(~Value & 0xffff);
1859   }
1860 
1861   bool isNEONi32splat() const {
1862     if (isNEONByteReplicate(4))
1863       return false; // Leave that for bytes replication and forbid by default.
1864     if (!isImm())
1865       return false;
1866     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1867     // Must be a constant.
1868     if (!CE) return false;
1869     unsigned Value = CE->getValue();
1870     return ARM_AM::isNEONi32splat(Value);
1871   }
1872 
1873   bool isNEONi32splatNot() const {
1874     if (!isImm())
1875       return false;
1876     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1877     // Must be a constant.
1878     if (!CE) return false;
1879     unsigned Value = CE->getValue();
1880     return ARM_AM::isNEONi32splat(~Value);
1881   }
1882 
1883   static bool isValidNEONi32vmovImm(int64_t Value) {
1884     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1885     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1886     return ((Value & 0xffffffffffffff00) == 0) ||
1887            ((Value & 0xffffffffffff00ff) == 0) ||
1888            ((Value & 0xffffffffff00ffff) == 0) ||
1889            ((Value & 0xffffffff00ffffff) == 0) ||
1890            ((Value & 0xffffffffffff00ff) == 0xff) ||
1891            ((Value & 0xffffffffff00ffff) == 0xffff);
1892   }
1893 
1894   bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const {
1895     assert((Width == 8 || Width == 16 || Width == 32) &&
1896            "Invalid element width");
1897     assert(NumElems * Width <= 64 && "Invalid result width");
1898 
1899     if (!isImm())
1900       return false;
1901     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1902     // Must be a constant.
1903     if (!CE)
1904       return false;
1905     int64_t Value = CE->getValue();
1906     if (!Value)
1907       return false; // Don't bother with zero.
1908     if (Inv)
1909       Value = ~Value;
1910 
1911     uint64_t Mask = (1ull << Width) - 1;
1912     uint64_t Elem = Value & Mask;
1913     if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0)
1914       return false;
1915     if (Width == 32 && !isValidNEONi32vmovImm(Elem))
1916       return false;
1917 
1918     for (unsigned i = 1; i < NumElems; ++i) {
1919       Value >>= Width;
1920       if ((Value & Mask) != Elem)
1921         return false;
1922     }
1923     return true;
1924   }
1925 
1926   bool isNEONByteReplicate(unsigned NumBytes) const {
1927     return isNEONReplicate(8, NumBytes, false);
1928   }
1929 
1930   static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) {
1931     assert((FromW == 8 || FromW == 16 || FromW == 32) &&
1932            "Invalid source width");
1933     assert((ToW == 16 || ToW == 32 || ToW == 64) &&
1934            "Invalid destination width");
1935     assert(FromW < ToW && "ToW is not less than FromW");
1936   }
1937 
1938   template<unsigned FromW, unsigned ToW>
1939   bool isNEONmovReplicate() const {
1940     checkNeonReplicateArgs(FromW, ToW);
1941     if (ToW == 64 && isNEONi64splat())
1942       return false;
1943     return isNEONReplicate(FromW, ToW / FromW, false);
1944   }
1945 
1946   template<unsigned FromW, unsigned ToW>
1947   bool isNEONinvReplicate() const {
1948     checkNeonReplicateArgs(FromW, ToW);
1949     return isNEONReplicate(FromW, ToW / FromW, true);
1950   }
1951 
1952   bool isNEONi32vmov() const {
1953     if (isNEONByteReplicate(4))
1954       return false; // Let it to be classified as byte-replicate case.
1955     if (!isImm())
1956       return false;
1957     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1958     // Must be a constant.
1959     if (!CE)
1960       return false;
1961     return isValidNEONi32vmovImm(CE->getValue());
1962   }
1963 
1964   bool isNEONi32vmovNeg() const {
1965     if (!isImm()) return false;
1966     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1967     // Must be a constant.
1968     if (!CE) return false;
1969     return isValidNEONi32vmovImm(~CE->getValue());
1970   }
1971 
1972   bool isNEONi64splat() const {
1973     if (!isImm()) return false;
1974     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1975     // Must be a constant.
1976     if (!CE) return false;
1977     uint64_t Value = CE->getValue();
1978     // i64 value with each byte being either 0 or 0xff.
1979     for (unsigned i = 0; i < 8; ++i, Value >>= 8)
1980       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1981     return true;
1982   }
1983 
1984   template<int64_t Angle, int64_t Remainder>
1985   bool isComplexRotation() const {
1986     if (!isImm()) return false;
1987 
1988     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1989     if (!CE) return false;
1990     uint64_t Value = CE->getValue();
1991 
1992     return (Value % Angle == Remainder && Value <= 270);
1993   }
1994 
1995   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1996     // Add as immediates when possible.  Null MCExpr = 0.
1997     if (!Expr)
1998       Inst.addOperand(MCOperand::createImm(0));
1999     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
2000       Inst.addOperand(MCOperand::createImm(CE->getValue()));
2001     else
2002       Inst.addOperand(MCOperand::createExpr(Expr));
2003   }
2004 
2005   void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
2006     assert(N == 1 && "Invalid number of operands!");
2007     addExpr(Inst, getImm());
2008   }
2009 
2010   void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
2011     assert(N == 1 && "Invalid number of operands!");
2012     addExpr(Inst, getImm());
2013   }
2014 
2015   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
2016     assert(N == 2 && "Invalid number of operands!");
2017     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2018     unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
2019     Inst.addOperand(MCOperand::createReg(RegNum));
2020   }
2021 
2022   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
2023     assert(N == 1 && "Invalid number of operands!");
2024     Inst.addOperand(MCOperand::createImm(getCoproc()));
2025   }
2026 
2027   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
2028     assert(N == 1 && "Invalid number of operands!");
2029     Inst.addOperand(MCOperand::createImm(getCoproc()));
2030   }
2031 
2032   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
2033     assert(N == 1 && "Invalid number of operands!");
2034     Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
2035   }
2036 
2037   void addITMaskOperands(MCInst &Inst, unsigned N) const {
2038     assert(N == 1 && "Invalid number of operands!");
2039     Inst.addOperand(MCOperand::createImm(ITMask.Mask));
2040   }
2041 
2042   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
2043     assert(N == 1 && "Invalid number of operands!");
2044     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
2045   }
2046 
2047   void addCCOutOperands(MCInst &Inst, unsigned N) const {
2048     assert(N == 1 && "Invalid number of operands!");
2049     Inst.addOperand(MCOperand::createReg(getReg()));
2050   }
2051 
2052   void addRegOperands(MCInst &Inst, unsigned N) const {
2053     assert(N == 1 && "Invalid number of operands!");
2054     Inst.addOperand(MCOperand::createReg(getReg()));
2055   }
2056 
2057   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
2058     assert(N == 3 && "Invalid number of operands!");
2059     assert(isRegShiftedReg() &&
2060            "addRegShiftedRegOperands() on non-RegShiftedReg!");
2061     Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
2062     Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
2063     Inst.addOperand(MCOperand::createImm(
2064       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
2065   }
2066 
2067   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
2068     assert(N == 2 && "Invalid number of operands!");
2069     assert(isRegShiftedImm() &&
2070            "addRegShiftedImmOperands() on non-RegShiftedImm!");
2071     Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2072     // Shift of #32 is encoded as 0 where permitted
2073     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2074     Inst.addOperand(MCOperand::createImm(
2075       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2076   }
2077 
2078   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2079     assert(N == 1 && "Invalid number of operands!");
2080     Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2081                                          ShifterImm.Imm));
2082   }
2083 
2084   void addRegListOperands(MCInst &Inst, unsigned N) const {
2085     assert(N == 1 && "Invalid number of operands!");
2086     const SmallVectorImpl<unsigned> &RegList = getRegList();
2087     for (SmallVectorImpl<unsigned>::const_iterator
2088            I = RegList.begin(), E = RegList.end(); I != E; ++I)
2089       Inst.addOperand(MCOperand::createReg(*I));
2090   }
2091 
2092   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2093     addRegListOperands(Inst, N);
2094   }
2095 
2096   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2097     addRegListOperands(Inst, N);
2098   }
2099 
2100   void addRotImmOperands(MCInst &Inst, unsigned N) const {
2101     assert(N == 1 && "Invalid number of operands!");
2102     // Encoded as val>>3. The printer handles display as 8, 16, 24.
2103     Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2104   }
2105 
2106   void addModImmOperands(MCInst &Inst, unsigned N) const {
2107     assert(N == 1 && "Invalid number of operands!");
2108 
2109     // Support for fixups (MCFixup)
2110     if (isImm())
2111       return addImmOperands(Inst, N);
2112 
2113     Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2114   }
2115 
2116   void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2117     assert(N == 1 && "Invalid number of operands!");
2118     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2119     uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2120     Inst.addOperand(MCOperand::createImm(Enc));
2121   }
2122 
2123   void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2124     assert(N == 1 && "Invalid number of operands!");
2125     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2126     uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2127     Inst.addOperand(MCOperand::createImm(Enc));
2128   }
2129 
2130   void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2131     assert(N == 1 && "Invalid number of operands!");
2132     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2133     uint32_t Val = -CE->getValue();
2134     Inst.addOperand(MCOperand::createImm(Val));
2135   }
2136 
2137   void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2138     assert(N == 1 && "Invalid number of operands!");
2139     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2140     uint32_t Val = -CE->getValue();
2141     Inst.addOperand(MCOperand::createImm(Val));
2142   }
2143 
2144   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2145     assert(N == 1 && "Invalid number of operands!");
2146     // Munge the lsb/width into a bitfield mask.
2147     unsigned lsb = Bitfield.LSB;
2148     unsigned width = Bitfield.Width;
2149     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2150     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2151                       (32 - (lsb + width)));
2152     Inst.addOperand(MCOperand::createImm(Mask));
2153   }
2154 
2155   void addImmOperands(MCInst &Inst, unsigned N) const {
2156     assert(N == 1 && "Invalid number of operands!");
2157     addExpr(Inst, getImm());
2158   }
2159 
2160   void addFBits16Operands(MCInst &Inst, unsigned N) const {
2161     assert(N == 1 && "Invalid number of operands!");
2162     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2163     Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2164   }
2165 
2166   void addFBits32Operands(MCInst &Inst, unsigned N) const {
2167     assert(N == 1 && "Invalid number of operands!");
2168     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2169     Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2170   }
2171 
2172   void addFPImmOperands(MCInst &Inst, unsigned N) const {
2173     assert(N == 1 && "Invalid number of operands!");
2174     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2175     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2176     Inst.addOperand(MCOperand::createImm(Val));
2177   }
2178 
2179   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2180     assert(N == 1 && "Invalid number of operands!");
2181     // FIXME: We really want to scale the value here, but the LDRD/STRD
2182     // instruction don't encode operands that way yet.
2183     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2184     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2185   }
2186 
2187   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2188     assert(N == 1 && "Invalid number of operands!");
2189     // The immediate is scaled by four in the encoding and is stored
2190     // in the MCInst as such. Lop off the low two bits here.
2191     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2192     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2193   }
2194 
2195   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2196     assert(N == 1 && "Invalid number of operands!");
2197     // The immediate is scaled by four in the encoding and is stored
2198     // in the MCInst as such. Lop off the low two bits here.
2199     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2200     Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2201   }
2202 
2203   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2204     assert(N == 1 && "Invalid number of operands!");
2205     // The immediate is scaled by four in the encoding and is stored
2206     // in the MCInst as such. Lop off the low two bits here.
2207     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2208     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2209   }
2210 
2211   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2212     assert(N == 1 && "Invalid number of operands!");
2213     // The constant encodes as the immediate-1, and we store in the instruction
2214     // the bits as encoded, so subtract off one here.
2215     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2216     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2217   }
2218 
2219   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2220     assert(N == 1 && "Invalid number of operands!");
2221     // The constant encodes as the immediate-1, and we store in the instruction
2222     // the bits as encoded, so subtract off one here.
2223     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2224     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2225   }
2226 
2227   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2228     assert(N == 1 && "Invalid number of operands!");
2229     // The constant encodes as the immediate, except for 32, which encodes as
2230     // zero.
2231     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2232     unsigned Imm = CE->getValue();
2233     Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2234   }
2235 
2236   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2237     assert(N == 1 && "Invalid number of operands!");
2238     // An ASR value of 32 encodes as 0, so that's how we want to add it to
2239     // the instruction as well.
2240     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2241     int Val = CE->getValue();
2242     Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2243   }
2244 
2245   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2246     assert(N == 1 && "Invalid number of operands!");
2247     // The operand is actually a t2_so_imm, but we have its bitwise
2248     // negation in the assembly source, so twiddle it here.
2249     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2250     Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2251   }
2252 
2253   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2254     assert(N == 1 && "Invalid number of operands!");
2255     // The operand is actually a t2_so_imm, but we have its
2256     // negation in the assembly source, so twiddle it here.
2257     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2258     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2259   }
2260 
2261   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2262     assert(N == 1 && "Invalid number of operands!");
2263     // The operand is actually an imm0_4095, but we have its
2264     // negation in the assembly source, so twiddle it here.
2265     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2266     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2267   }
2268 
2269   void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2270     if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2271       Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2272       return;
2273     }
2274 
2275     const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2276     assert(SR && "Unknown value type!");
2277     Inst.addOperand(MCOperand::createExpr(SR));
2278   }
2279 
2280   void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2281     assert(N == 1 && "Invalid number of operands!");
2282     if (isImm()) {
2283       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2284       if (CE) {
2285         Inst.addOperand(MCOperand::createImm(CE->getValue()));
2286         return;
2287       }
2288 
2289       const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2290 
2291       assert(SR && "Unknown value type!");
2292       Inst.addOperand(MCOperand::createExpr(SR));
2293       return;
2294     }
2295 
2296     assert(isMem()  && "Unknown value type!");
2297     assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2298     Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
2299   }
2300 
2301   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2302     assert(N == 1 && "Invalid number of operands!");
2303     Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2304   }
2305 
2306   void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2307     assert(N == 1 && "Invalid number of operands!");
2308     Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2309   }
2310 
2311   void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2312     assert(N == 1 && "Invalid number of operands!");
2313     Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt())));
2314   }
2315 
2316   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2317     assert(N == 1 && "Invalid number of operands!");
2318     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2319   }
2320 
2321   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2322     assert(N == 1 && "Invalid number of operands!");
2323     int32_t Imm = Memory.OffsetImm->getValue();
2324     Inst.addOperand(MCOperand::createImm(Imm));
2325   }
2326 
2327   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2328     assert(N == 1 && "Invalid number of operands!");
2329     assert(isImm() && "Not an immediate!");
2330 
2331     // If we have an immediate that's not a constant, treat it as a label
2332     // reference needing a fixup.
2333     if (!isa<MCConstantExpr>(getImm())) {
2334       Inst.addOperand(MCOperand::createExpr(getImm()));
2335       return;
2336     }
2337 
2338     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2339     int Val = CE->getValue();
2340     Inst.addOperand(MCOperand::createImm(Val));
2341   }
2342 
2343   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2344     assert(N == 2 && "Invalid number of operands!");
2345     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2346     Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2347   }
2348 
2349   void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2350     addAlignedMemoryOperands(Inst, N);
2351   }
2352 
2353   void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2354     addAlignedMemoryOperands(Inst, N);
2355   }
2356 
2357   void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2358     addAlignedMemoryOperands(Inst, N);
2359   }
2360 
2361   void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2362     addAlignedMemoryOperands(Inst, N);
2363   }
2364 
2365   void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2366     addAlignedMemoryOperands(Inst, N);
2367   }
2368 
2369   void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2370     addAlignedMemoryOperands(Inst, N);
2371   }
2372 
2373   void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2374     addAlignedMemoryOperands(Inst, N);
2375   }
2376 
2377   void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2378     addAlignedMemoryOperands(Inst, N);
2379   }
2380 
2381   void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2382     addAlignedMemoryOperands(Inst, N);
2383   }
2384 
2385   void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2386     addAlignedMemoryOperands(Inst, N);
2387   }
2388 
2389   void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2390     addAlignedMemoryOperands(Inst, N);
2391   }
2392 
2393   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2394     assert(N == 3 && "Invalid number of operands!");
2395     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2396     if (!Memory.OffsetRegNum) {
2397       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2398       // Special case for #-0
2399       if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2400       if (Val < 0) Val = -Val;
2401       Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2402     } else {
2403       // For register offset, we encode the shift type and negation flag
2404       // here.
2405       Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2406                               Memory.ShiftImm, Memory.ShiftType);
2407     }
2408     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2409     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2410     Inst.addOperand(MCOperand::createImm(Val));
2411   }
2412 
2413   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2414     assert(N == 2 && "Invalid number of operands!");
2415     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2416     assert(CE && "non-constant AM2OffsetImm operand!");
2417     int32_t Val = CE->getValue();
2418     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2419     // Special case for #-0
2420     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2421     if (Val < 0) Val = -Val;
2422     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2423     Inst.addOperand(MCOperand::createReg(0));
2424     Inst.addOperand(MCOperand::createImm(Val));
2425   }
2426 
2427   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2428     assert(N == 3 && "Invalid number of operands!");
2429     // If we have an immediate that's not a constant, treat it as a label
2430     // reference needing a fixup. If it is a constant, it's something else
2431     // and we reject it.
2432     if (isImm()) {
2433       Inst.addOperand(MCOperand::createExpr(getImm()));
2434       Inst.addOperand(MCOperand::createReg(0));
2435       Inst.addOperand(MCOperand::createImm(0));
2436       return;
2437     }
2438 
2439     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2440     if (!Memory.OffsetRegNum) {
2441       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2442       // Special case for #-0
2443       if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2444       if (Val < 0) Val = -Val;
2445       Val = ARM_AM::getAM3Opc(AddSub, Val);
2446     } else {
2447       // For register offset, we encode the shift type and negation flag
2448       // here.
2449       Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2450     }
2451     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2452     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2453     Inst.addOperand(MCOperand::createImm(Val));
2454   }
2455 
2456   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2457     assert(N == 2 && "Invalid number of operands!");
2458     if (Kind == k_PostIndexRegister) {
2459       int32_t Val =
2460         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2461       Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2462       Inst.addOperand(MCOperand::createImm(Val));
2463       return;
2464     }
2465 
2466     // Constant offset.
2467     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2468     int32_t Val = CE->getValue();
2469     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2470     // Special case for #-0
2471     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2472     if (Val < 0) Val = -Val;
2473     Val = ARM_AM::getAM3Opc(AddSub, Val);
2474     Inst.addOperand(MCOperand::createReg(0));
2475     Inst.addOperand(MCOperand::createImm(Val));
2476   }
2477 
2478   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2479     assert(N == 2 && "Invalid number of operands!");
2480     // If we have an immediate that's not a constant, treat it as a label
2481     // reference needing a fixup. If it is a constant, it's something else
2482     // and we reject it.
2483     if (isImm()) {
2484       Inst.addOperand(MCOperand::createExpr(getImm()));
2485       Inst.addOperand(MCOperand::createImm(0));
2486       return;
2487     }
2488 
2489     // The lower two bits are always zero and as such are not encoded.
2490     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2491     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2492     // Special case for #-0
2493     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2494     if (Val < 0) Val = -Val;
2495     Val = ARM_AM::getAM5Opc(AddSub, Val);
2496     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2497     Inst.addOperand(MCOperand::createImm(Val));
2498   }
2499 
2500   void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
2501     assert(N == 2 && "Invalid number of operands!");
2502     // If we have an immediate that's not a constant, treat it as a label
2503     // reference needing a fixup. If it is a constant, it's something else
2504     // and we reject it.
2505     if (isImm()) {
2506       Inst.addOperand(MCOperand::createExpr(getImm()));
2507       Inst.addOperand(MCOperand::createImm(0));
2508       return;
2509     }
2510 
2511     // The lower bit is always zero and as such is not encoded.
2512     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
2513     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2514     // Special case for #-0
2515     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2516     if (Val < 0) Val = -Val;
2517     Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
2518     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2519     Inst.addOperand(MCOperand::createImm(Val));
2520   }
2521 
2522   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2523     assert(N == 2 && "Invalid number of operands!");
2524     // If we have an immediate that's not a constant, treat it as a label
2525     // reference needing a fixup. If it is a constant, it's something else
2526     // and we reject it.
2527     if (isImm()) {
2528       Inst.addOperand(MCOperand::createExpr(getImm()));
2529       Inst.addOperand(MCOperand::createImm(0));
2530       return;
2531     }
2532 
2533     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2534     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2535     Inst.addOperand(MCOperand::createImm(Val));
2536   }
2537 
2538   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2539     assert(N == 2 && "Invalid number of operands!");
2540     // The lower two bits are always zero and as such are not encoded.
2541     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2542     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2543     Inst.addOperand(MCOperand::createImm(Val));
2544   }
2545 
2546   void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2547     assert(N == 2 && "Invalid number of operands!");
2548     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2549     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2550     Inst.addOperand(MCOperand::createImm(Val));
2551   }
2552 
2553   void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2554     addMemImm8OffsetOperands(Inst, N);
2555   }
2556 
2557   void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2558     addMemImm8OffsetOperands(Inst, N);
2559   }
2560 
2561   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2562     assert(N == 2 && "Invalid number of operands!");
2563     // If this is an immediate, it's a label reference.
2564     if (isImm()) {
2565       addExpr(Inst, getImm());
2566       Inst.addOperand(MCOperand::createImm(0));
2567       return;
2568     }
2569 
2570     // Otherwise, it's a normal memory reg+offset.
2571     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2572     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2573     Inst.addOperand(MCOperand::createImm(Val));
2574   }
2575 
2576   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2577     assert(N == 2 && "Invalid number of operands!");
2578     // If this is an immediate, it's a label reference.
2579     if (isImm()) {
2580       addExpr(Inst, getImm());
2581       Inst.addOperand(MCOperand::createImm(0));
2582       return;
2583     }
2584 
2585     // Otherwise, it's a normal memory reg+offset.
2586     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2587     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2588     Inst.addOperand(MCOperand::createImm(Val));
2589   }
2590 
2591   void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
2592     assert(N == 1 && "Invalid number of operands!");
2593     // This is container for the immediate that we will create the constant
2594     // pool from
2595     addExpr(Inst, getConstantPoolImm());
2596     return;
2597   }
2598 
2599   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2600     assert(N == 2 && "Invalid number of operands!");
2601     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2602     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2603   }
2604 
2605   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2606     assert(N == 2 && "Invalid number of operands!");
2607     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2608     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2609   }
2610 
2611   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2612     assert(N == 3 && "Invalid number of operands!");
2613     unsigned Val =
2614       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2615                         Memory.ShiftImm, Memory.ShiftType);
2616     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2617     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2618     Inst.addOperand(MCOperand::createImm(Val));
2619   }
2620 
2621   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2622     assert(N == 3 && "Invalid number of operands!");
2623     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2624     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2625     Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
2626   }
2627 
2628   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2629     assert(N == 2 && "Invalid number of operands!");
2630     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2631     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2632   }
2633 
2634   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2635     assert(N == 2 && "Invalid number of operands!");
2636     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2637     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2638     Inst.addOperand(MCOperand::createImm(Val));
2639   }
2640 
2641   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2642     assert(N == 2 && "Invalid number of operands!");
2643     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2644     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2645     Inst.addOperand(MCOperand::createImm(Val));
2646   }
2647 
2648   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2649     assert(N == 2 && "Invalid number of operands!");
2650     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2651     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2652     Inst.addOperand(MCOperand::createImm(Val));
2653   }
2654 
2655   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2656     assert(N == 2 && "Invalid number of operands!");
2657     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2658     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2659     Inst.addOperand(MCOperand::createImm(Val));
2660   }
2661 
2662   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2663     assert(N == 1 && "Invalid number of operands!");
2664     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2665     assert(CE && "non-constant post-idx-imm8 operand!");
2666     int Imm = CE->getValue();
2667     bool isAdd = Imm >= 0;
2668     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2669     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2670     Inst.addOperand(MCOperand::createImm(Imm));
2671   }
2672 
2673   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2674     assert(N == 1 && "Invalid number of operands!");
2675     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2676     assert(CE && "non-constant post-idx-imm8s4 operand!");
2677     int Imm = CE->getValue();
2678     bool isAdd = Imm >= 0;
2679     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2680     // Immediate is scaled by 4.
2681     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2682     Inst.addOperand(MCOperand::createImm(Imm));
2683   }
2684 
2685   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2686     assert(N == 2 && "Invalid number of operands!");
2687     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2688     Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
2689   }
2690 
2691   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2692     assert(N == 2 && "Invalid number of operands!");
2693     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2694     // The sign, shift type, and shift amount are encoded in a single operand
2695     // using the AM2 encoding helpers.
2696     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2697     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2698                                      PostIdxReg.ShiftTy);
2699     Inst.addOperand(MCOperand::createImm(Imm));
2700   }
2701 
2702   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2703     assert(N == 1 && "Invalid number of operands!");
2704     Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
2705   }
2706 
2707   void addBankedRegOperands(MCInst &Inst, unsigned N) const {
2708     assert(N == 1 && "Invalid number of operands!");
2709     Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
2710   }
2711 
2712   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2713     assert(N == 1 && "Invalid number of operands!");
2714     Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
2715   }
2716 
2717   void addVecListOperands(MCInst &Inst, unsigned N) const {
2718     assert(N == 1 && "Invalid number of operands!");
2719     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2720   }
2721 
2722   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2723     assert(N == 2 && "Invalid number of operands!");
2724     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2725     Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
2726   }
2727 
2728   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2729     assert(N == 1 && "Invalid number of operands!");
2730     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2731   }
2732 
2733   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2734     assert(N == 1 && "Invalid number of operands!");
2735     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2736   }
2737 
2738   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2739     assert(N == 1 && "Invalid number of operands!");
2740     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2741   }
2742 
2743   void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
2744     assert(N == 1 && "Invalid number of operands!");
2745     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2746   }
2747 
2748   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2749     assert(N == 1 && "Invalid number of operands!");
2750     // The immediate encodes the type of constant as well as the value.
2751     // Mask in that this is an i8 splat.
2752     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2753     Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
2754   }
2755 
2756   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2757     assert(N == 1 && "Invalid number of operands!");
2758     // The immediate encodes the type of constant as well as the value.
2759     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2760     unsigned Value = CE->getValue();
2761     Value = ARM_AM::encodeNEONi16splat(Value);
2762     Inst.addOperand(MCOperand::createImm(Value));
2763   }
2764 
2765   void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
2766     assert(N == 1 && "Invalid number of operands!");
2767     // The immediate encodes the type of constant as well as the value.
2768     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2769     unsigned Value = CE->getValue();
2770     Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
2771     Inst.addOperand(MCOperand::createImm(Value));
2772   }
2773 
2774   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2775     assert(N == 1 && "Invalid number of operands!");
2776     // The immediate encodes the type of constant as well as the value.
2777     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2778     unsigned Value = CE->getValue();
2779     Value = ARM_AM::encodeNEONi32splat(Value);
2780     Inst.addOperand(MCOperand::createImm(Value));
2781   }
2782 
2783   void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
2784     assert(N == 1 && "Invalid number of operands!");
2785     // The immediate encodes the type of constant as well as the value.
2786     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2787     unsigned Value = CE->getValue();
2788     Value = ARM_AM::encodeNEONi32splat(~Value);
2789     Inst.addOperand(MCOperand::createImm(Value));
2790   }
2791 
2792   void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const {
2793     // The immediate encodes the type of constant as well as the value.
2794     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2795     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2796             Inst.getOpcode() == ARM::VMOVv16i8) &&
2797           "All instructions that wants to replicate non-zero byte "
2798           "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2799     unsigned Value = CE->getValue();
2800     if (Inv)
2801       Value = ~Value;
2802     unsigned B = Value & 0xff;
2803     B |= 0xe00; // cmode = 0b1110
2804     Inst.addOperand(MCOperand::createImm(B));
2805   }
2806 
2807   void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const {
2808     assert(N == 1 && "Invalid number of operands!");
2809     addNEONi8ReplicateOperands(Inst, true);
2810   }
2811 
2812   static unsigned encodeNeonVMOVImmediate(unsigned Value) {
2813     if (Value >= 256 && Value <= 0xffff)
2814       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2815     else if (Value > 0xffff && Value <= 0xffffff)
2816       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2817     else if (Value > 0xffffff)
2818       Value = (Value >> 24) | 0x600;
2819     return Value;
2820   }
2821 
2822   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2823     assert(N == 1 && "Invalid number of operands!");
2824     // The immediate encodes the type of constant as well as the value.
2825     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2826     unsigned Value = encodeNeonVMOVImmediate(CE->getValue());
2827     Inst.addOperand(MCOperand::createImm(Value));
2828   }
2829 
2830   void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const {
2831     assert(N == 1 && "Invalid number of operands!");
2832     addNEONi8ReplicateOperands(Inst, false);
2833   }
2834 
2835   void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const {
2836     assert(N == 1 && "Invalid number of operands!");
2837     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2838     assert((Inst.getOpcode() == ARM::VMOVv4i16 ||
2839             Inst.getOpcode() == ARM::VMOVv8i16 ||
2840             Inst.getOpcode() == ARM::VMVNv4i16 ||
2841             Inst.getOpcode() == ARM::VMVNv8i16) &&
2842           "All instructions that want to replicate non-zero half-word "
2843           "always must be replaced with V{MOV,MVN}v{4,8}i16.");
2844     uint64_t Value = CE->getValue();
2845     unsigned Elem = Value & 0xffff;
2846     if (Elem >= 256)
2847       Elem = (Elem >> 8) | 0x200;
2848     Inst.addOperand(MCOperand::createImm(Elem));
2849   }
2850 
2851   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2852     assert(N == 1 && "Invalid number of operands!");
2853     // The immediate encodes the type of constant as well as the value.
2854     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2855     unsigned Value = encodeNeonVMOVImmediate(~CE->getValue());
2856     Inst.addOperand(MCOperand::createImm(Value));
2857   }
2858 
2859   void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const {
2860     assert(N == 1 && "Invalid number of operands!");
2861     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2862     assert((Inst.getOpcode() == ARM::VMOVv2i32 ||
2863             Inst.getOpcode() == ARM::VMOVv4i32 ||
2864             Inst.getOpcode() == ARM::VMVNv2i32 ||
2865             Inst.getOpcode() == ARM::VMVNv4i32) &&
2866           "All instructions that want to replicate non-zero word "
2867           "always must be replaced with V{MOV,MVN}v{2,4}i32.");
2868     uint64_t Value = CE->getValue();
2869     unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff);
2870     Inst.addOperand(MCOperand::createImm(Elem));
2871   }
2872 
2873   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2874     assert(N == 1 && "Invalid number of operands!");
2875     // The immediate encodes the type of constant as well as the value.
2876     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2877     uint64_t Value = CE->getValue();
2878     unsigned Imm = 0;
2879     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2880       Imm |= (Value & 1) << i;
2881     }
2882     Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
2883   }
2884 
2885   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2886     assert(N == 1 && "Invalid number of operands!");
2887     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2888     Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
2889   }
2890 
2891   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2892     assert(N == 1 && "Invalid number of operands!");
2893     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2894     Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
2895   }
2896 
2897   void print(raw_ostream &OS) const override;
2898 
2899   static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
2900     auto Op = make_unique<ARMOperand>(k_ITCondMask);
2901     Op->ITMask.Mask = Mask;
2902     Op->StartLoc = S;
2903     Op->EndLoc = S;
2904     return Op;
2905   }
2906 
2907   static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
2908                                                     SMLoc S) {
2909     auto Op = make_unique<ARMOperand>(k_CondCode);
2910     Op->CC.Val = CC;
2911     Op->StartLoc = S;
2912     Op->EndLoc = S;
2913     return Op;
2914   }
2915 
2916   static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
2917     auto Op = make_unique<ARMOperand>(k_CoprocNum);
2918     Op->Cop.Val = CopVal;
2919     Op->StartLoc = S;
2920     Op->EndLoc = S;
2921     return Op;
2922   }
2923 
2924   static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
2925     auto Op = make_unique<ARMOperand>(k_CoprocReg);
2926     Op->Cop.Val = CopVal;
2927     Op->StartLoc = S;
2928     Op->EndLoc = S;
2929     return Op;
2930   }
2931 
2932   static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
2933                                                         SMLoc E) {
2934     auto Op = make_unique<ARMOperand>(k_CoprocOption);
2935     Op->Cop.Val = Val;
2936     Op->StartLoc = S;
2937     Op->EndLoc = E;
2938     return Op;
2939   }
2940 
2941   static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
2942     auto Op = make_unique<ARMOperand>(k_CCOut);
2943     Op->Reg.RegNum = RegNum;
2944     Op->StartLoc = S;
2945     Op->EndLoc = S;
2946     return Op;
2947   }
2948 
2949   static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
2950     auto Op = make_unique<ARMOperand>(k_Token);
2951     Op->Tok.Data = Str.data();
2952     Op->Tok.Length = Str.size();
2953     Op->StartLoc = S;
2954     Op->EndLoc = S;
2955     return Op;
2956   }
2957 
2958   static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
2959                                                SMLoc E) {
2960     auto Op = make_unique<ARMOperand>(k_Register);
2961     Op->Reg.RegNum = RegNum;
2962     Op->StartLoc = S;
2963     Op->EndLoc = E;
2964     return Op;
2965   }
2966 
2967   static std::unique_ptr<ARMOperand>
2968   CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2969                         unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
2970                         SMLoc E) {
2971     auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
2972     Op->RegShiftedReg.ShiftTy = ShTy;
2973     Op->RegShiftedReg.SrcReg = SrcReg;
2974     Op->RegShiftedReg.ShiftReg = ShiftReg;
2975     Op->RegShiftedReg.ShiftImm = ShiftImm;
2976     Op->StartLoc = S;
2977     Op->EndLoc = E;
2978     return Op;
2979   }
2980 
2981   static std::unique_ptr<ARMOperand>
2982   CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2983                          unsigned ShiftImm, SMLoc S, SMLoc E) {
2984     auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
2985     Op->RegShiftedImm.ShiftTy = ShTy;
2986     Op->RegShiftedImm.SrcReg = SrcReg;
2987     Op->RegShiftedImm.ShiftImm = ShiftImm;
2988     Op->StartLoc = S;
2989     Op->EndLoc = E;
2990     return Op;
2991   }
2992 
2993   static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
2994                                                       SMLoc S, SMLoc E) {
2995     auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
2996     Op->ShifterImm.isASR = isASR;
2997     Op->ShifterImm.Imm = Imm;
2998     Op->StartLoc = S;
2999     Op->EndLoc = E;
3000     return Op;
3001   }
3002 
3003   static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
3004                                                   SMLoc E) {
3005     auto Op = make_unique<ARMOperand>(k_RotateImmediate);
3006     Op->RotImm.Imm = Imm;
3007     Op->StartLoc = S;
3008     Op->EndLoc = E;
3009     return Op;
3010   }
3011 
3012   static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
3013                                                   SMLoc S, SMLoc E) {
3014     auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
3015     Op->ModImm.Bits = Bits;
3016     Op->ModImm.Rot = Rot;
3017     Op->StartLoc = S;
3018     Op->EndLoc = E;
3019     return Op;
3020   }
3021 
3022   static std::unique_ptr<ARMOperand>
3023   CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
3024     auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate);
3025     Op->Imm.Val = Val;
3026     Op->StartLoc = S;
3027     Op->EndLoc = E;
3028     return Op;
3029   }
3030 
3031   static std::unique_ptr<ARMOperand>
3032   CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
3033     auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
3034     Op->Bitfield.LSB = LSB;
3035     Op->Bitfield.Width = Width;
3036     Op->StartLoc = S;
3037     Op->EndLoc = E;
3038     return Op;
3039   }
3040 
3041   static std::unique_ptr<ARMOperand>
3042   CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
3043                 SMLoc StartLoc, SMLoc EndLoc) {
3044     assert(Regs.size() > 0 && "RegList contains no registers?");
3045     KindTy Kind = k_RegisterList;
3046 
3047     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
3048       Kind = k_DPRRegisterList;
3049     else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
3050              contains(Regs.front().second))
3051       Kind = k_SPRRegisterList;
3052 
3053     // Sort based on the register encoding values.
3054     array_pod_sort(Regs.begin(), Regs.end());
3055 
3056     auto Op = make_unique<ARMOperand>(Kind);
3057     for (SmallVectorImpl<std::pair<unsigned, unsigned>>::const_iterator
3058            I = Regs.begin(), E = Regs.end(); I != E; ++I)
3059       Op->Registers.push_back(I->second);
3060     Op->StartLoc = StartLoc;
3061     Op->EndLoc = EndLoc;
3062     return Op;
3063   }
3064 
3065   static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
3066                                                       unsigned Count,
3067                                                       bool isDoubleSpaced,
3068                                                       SMLoc S, SMLoc E) {
3069     auto Op = make_unique<ARMOperand>(k_VectorList);
3070     Op->VectorList.RegNum = RegNum;
3071     Op->VectorList.Count = Count;
3072     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3073     Op->StartLoc = S;
3074     Op->EndLoc = E;
3075     return Op;
3076   }
3077 
3078   static std::unique_ptr<ARMOperand>
3079   CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
3080                            SMLoc S, SMLoc E) {
3081     auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
3082     Op->VectorList.RegNum = RegNum;
3083     Op->VectorList.Count = Count;
3084     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3085     Op->StartLoc = S;
3086     Op->EndLoc = E;
3087     return Op;
3088   }
3089 
3090   static std::unique_ptr<ARMOperand>
3091   CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
3092                           bool isDoubleSpaced, SMLoc S, SMLoc E) {
3093     auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
3094     Op->VectorList.RegNum = RegNum;
3095     Op->VectorList.Count = Count;
3096     Op->VectorList.LaneIndex = Index;
3097     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
3098     Op->StartLoc = S;
3099     Op->EndLoc = E;
3100     return Op;
3101   }
3102 
3103   static std::unique_ptr<ARMOperand>
3104   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3105     auto Op = make_unique<ARMOperand>(k_VectorIndex);
3106     Op->VectorIndex.Val = Idx;
3107     Op->StartLoc = S;
3108     Op->EndLoc = E;
3109     return Op;
3110   }
3111 
3112   static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3113                                                SMLoc E) {
3114     auto Op = make_unique<ARMOperand>(k_Immediate);
3115     Op->Imm.Val = Val;
3116     Op->StartLoc = S;
3117     Op->EndLoc = E;
3118     return Op;
3119   }
3120 
3121   static std::unique_ptr<ARMOperand>
3122   CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
3123             unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
3124             unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
3125             SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3126     auto Op = make_unique<ARMOperand>(k_Memory);
3127     Op->Memory.BaseRegNum = BaseRegNum;
3128     Op->Memory.OffsetImm = OffsetImm;
3129     Op->Memory.OffsetRegNum = OffsetRegNum;
3130     Op->Memory.ShiftType = ShiftType;
3131     Op->Memory.ShiftImm = ShiftImm;
3132     Op->Memory.Alignment = Alignment;
3133     Op->Memory.isNegative = isNegative;
3134     Op->StartLoc = S;
3135     Op->EndLoc = E;
3136     Op->AlignmentLoc = AlignmentLoc;
3137     return Op;
3138   }
3139 
3140   static std::unique_ptr<ARMOperand>
3141   CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3142                    unsigned ShiftImm, SMLoc S, SMLoc E) {
3143     auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
3144     Op->PostIdxReg.RegNum = RegNum;
3145     Op->PostIdxReg.isAdd = isAdd;
3146     Op->PostIdxReg.ShiftTy = ShiftTy;
3147     Op->PostIdxReg.ShiftImm = ShiftImm;
3148     Op->StartLoc = S;
3149     Op->EndLoc = E;
3150     return Op;
3151   }
3152 
3153   static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3154                                                          SMLoc S) {
3155     auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
3156     Op->MBOpt.Val = Opt;
3157     Op->StartLoc = S;
3158     Op->EndLoc = S;
3159     return Op;
3160   }
3161 
3162   static std::unique_ptr<ARMOperand>
3163   CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3164     auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3165     Op->ISBOpt.Val = Opt;
3166     Op->StartLoc = S;
3167     Op->EndLoc = S;
3168     return Op;
3169   }
3170 
3171   static std::unique_ptr<ARMOperand>
3172   CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) {
3173     auto Op = make_unique<ARMOperand>(k_TraceSyncBarrierOpt);
3174     Op->TSBOpt.Val = Opt;
3175     Op->StartLoc = S;
3176     Op->EndLoc = S;
3177     return Op;
3178   }
3179 
3180   static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3181                                                       SMLoc S) {
3182     auto Op = make_unique<ARMOperand>(k_ProcIFlags);
3183     Op->IFlags.Val = IFlags;
3184     Op->StartLoc = S;
3185     Op->EndLoc = S;
3186     return Op;
3187   }
3188 
3189   static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3190     auto Op = make_unique<ARMOperand>(k_MSRMask);
3191     Op->MMask.Val = MMask;
3192     Op->StartLoc = S;
3193     Op->EndLoc = S;
3194     return Op;
3195   }
3196 
3197   static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3198     auto Op = make_unique<ARMOperand>(k_BankedReg);
3199     Op->BankedReg.Val = Reg;
3200     Op->StartLoc = S;
3201     Op->EndLoc = S;
3202     return Op;
3203   }
3204 };
3205 
3206 } // end anonymous namespace.
3207 
3208 void ARMOperand::print(raw_ostream &OS) const {
3209   auto RegName = [](unsigned Reg) {
3210     if (Reg)
3211       return ARMInstPrinter::getRegisterName(Reg);
3212     else
3213       return "noreg";
3214   };
3215 
3216   switch (Kind) {
3217   case k_CondCode:
3218     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3219     break;
3220   case k_CCOut:
3221     OS << "<ccout " << RegName(getReg()) << ">";
3222     break;
3223   case k_ITCondMask: {
3224     static const char *const MaskStr[] = {
3225       "(invalid)", "(teee)", "(tee)", "(teet)",
3226       "(te)",      "(tete)", "(tet)", "(tett)",
3227       "(t)",       "(ttee)", "(tte)", "(ttet)",
3228       "(tt)",      "(ttte)", "(ttt)", "(tttt)"
3229     };
3230     assert((ITMask.Mask & 0xf) == ITMask.Mask);
3231     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3232     break;
3233   }
3234   case k_CoprocNum:
3235     OS << "<coprocessor number: " << getCoproc() << ">";
3236     break;
3237   case k_CoprocReg:
3238     OS << "<coprocessor register: " << getCoproc() << ">";
3239     break;
3240   case k_CoprocOption:
3241     OS << "<coprocessor option: " << CoprocOption.Val << ">";
3242     break;
3243   case k_MSRMask:
3244     OS << "<mask: " << getMSRMask() << ">";
3245     break;
3246   case k_BankedReg:
3247     OS << "<banked reg: " << getBankedReg() << ">";
3248     break;
3249   case k_Immediate:
3250     OS << *getImm();
3251     break;
3252   case k_MemBarrierOpt:
3253     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3254     break;
3255   case k_InstSyncBarrierOpt:
3256     OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3257     break;
3258   case k_TraceSyncBarrierOpt:
3259     OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">";
3260     break;
3261   case k_Memory:
3262     OS << "<memory";
3263     if (Memory.BaseRegNum)
3264       OS << " base:" << RegName(Memory.BaseRegNum);
3265     if (Memory.OffsetImm)
3266       OS << " offset-imm:" << *Memory.OffsetImm;
3267     if (Memory.OffsetRegNum)
3268       OS << " offset-reg:" << (Memory.isNegative ? "-" : "")
3269          << RegName(Memory.OffsetRegNum);
3270     if (Memory.ShiftType != ARM_AM::no_shift) {
3271       OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType);
3272       OS << " shift-imm:" << Memory.ShiftImm;
3273     }
3274     if (Memory.Alignment)
3275       OS << " alignment:" << Memory.Alignment;
3276     OS << ">";
3277     break;
3278   case k_PostIndexRegister:
3279     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3280        << RegName(PostIdxReg.RegNum);
3281     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3282       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3283          << PostIdxReg.ShiftImm;
3284     OS << ">";
3285     break;
3286   case k_ProcIFlags: {
3287     OS << "<ARM_PROC::";
3288     unsigned IFlags = getProcIFlags();
3289     for (int i=2; i >= 0; --i)
3290       if (IFlags & (1 << i))
3291         OS << ARM_PROC::IFlagsToString(1 << i);
3292     OS << ">";
3293     break;
3294   }
3295   case k_Register:
3296     OS << "<register " << RegName(getReg()) << ">";
3297     break;
3298   case k_ShifterImmediate:
3299     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3300        << " #" << ShifterImm.Imm << ">";
3301     break;
3302   case k_ShiftedRegister:
3303     OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " "
3304        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " "
3305        << RegName(RegShiftedReg.ShiftReg) << ">";
3306     break;
3307   case k_ShiftedImmediate:
3308     OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " "
3309        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #"
3310        << RegShiftedImm.ShiftImm << ">";
3311     break;
3312   case k_RotateImmediate:
3313     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3314     break;
3315   case k_ModifiedImmediate:
3316     OS << "<mod_imm #" << ModImm.Bits << ", #"
3317        <<  ModImm.Rot << ")>";
3318     break;
3319   case k_ConstantPoolImmediate:
3320     OS << "<constant_pool_imm #" << *getConstantPoolImm();
3321     break;
3322   case k_BitfieldDescriptor:
3323     OS << "<bitfield " << "lsb: " << Bitfield.LSB
3324        << ", width: " << Bitfield.Width << ">";
3325     break;
3326   case k_RegisterList:
3327   case k_DPRRegisterList:
3328   case k_SPRRegisterList: {
3329     OS << "<register_list ";
3330 
3331     const SmallVectorImpl<unsigned> &RegList = getRegList();
3332     for (SmallVectorImpl<unsigned>::const_iterator
3333            I = RegList.begin(), E = RegList.end(); I != E; ) {
3334       OS << RegName(*I);
3335       if (++I < E) OS << ", ";
3336     }
3337 
3338     OS << ">";
3339     break;
3340   }
3341   case k_VectorList:
3342     OS << "<vector_list " << VectorList.Count << " * "
3343        << RegName(VectorList.RegNum) << ">";
3344     break;
3345   case k_VectorListAllLanes:
3346     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
3347        << RegName(VectorList.RegNum) << ">";
3348     break;
3349   case k_VectorListIndexed:
3350     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
3351        << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">";
3352     break;
3353   case k_Token:
3354     OS << "'" << getToken() << "'";
3355     break;
3356   case k_VectorIndex:
3357     OS << "<vectorindex " << getVectorIndex() << ">";
3358     break;
3359   }
3360 }
3361 
3362 /// @name Auto-generated Match Functions
3363 /// {
3364 
3365 static unsigned MatchRegisterName(StringRef Name);
3366 
3367 /// }
3368 
3369 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
3370                                  SMLoc &StartLoc, SMLoc &EndLoc) {
3371   const AsmToken &Tok = getParser().getTok();
3372   StartLoc = Tok.getLoc();
3373   EndLoc = Tok.getEndLoc();
3374   RegNo = tryParseRegister();
3375 
3376   return (RegNo == (unsigned)-1);
3377 }
3378 
3379 /// Try to parse a register name.  The token must be an Identifier when called,
3380 /// and if it is a register name the token is eaten and the register number is
3381 /// returned.  Otherwise return -1.
3382 int ARMAsmParser::tryParseRegister() {
3383   MCAsmParser &Parser = getParser();
3384   const AsmToken &Tok = Parser.getTok();
3385   if (Tok.isNot(AsmToken::Identifier)) return -1;
3386 
3387   std::string lowerCase = Tok.getString().lower();
3388   unsigned RegNum = MatchRegisterName(lowerCase);
3389   if (!RegNum) {
3390     RegNum = StringSwitch<unsigned>(lowerCase)
3391       .Case("r13", ARM::SP)
3392       .Case("r14", ARM::LR)
3393       .Case("r15", ARM::PC)
3394       .Case("ip", ARM::R12)
3395       // Additional register name aliases for 'gas' compatibility.
3396       .Case("a1", ARM::R0)
3397       .Case("a2", ARM::R1)
3398       .Case("a3", ARM::R2)
3399       .Case("a4", ARM::R3)
3400       .Case("v1", ARM::R4)
3401       .Case("v2", ARM::R5)
3402       .Case("v3", ARM::R6)
3403       .Case("v4", ARM::R7)
3404       .Case("v5", ARM::R8)
3405       .Case("v6", ARM::R9)
3406       .Case("v7", ARM::R10)
3407       .Case("v8", ARM::R11)
3408       .Case("sb", ARM::R9)
3409       .Case("sl", ARM::R10)
3410       .Case("fp", ARM::R11)
3411       .Default(0);
3412   }
3413   if (!RegNum) {
3414     // Check for aliases registered via .req. Canonicalize to lower case.
3415     // That's more consistent since register names are case insensitive, and
3416     // it's how the original entry was passed in from MC/MCParser/AsmParser.
3417     StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
3418     // If no match, return failure.
3419     if (Entry == RegisterReqs.end())
3420       return -1;
3421     Parser.Lex(); // Eat identifier token.
3422     return Entry->getValue();
3423   }
3424 
3425   // Some FPUs only have 16 D registers, so D16-D31 are invalid
3426   if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
3427     return -1;
3428 
3429   Parser.Lex(); // Eat identifier token.
3430 
3431   return RegNum;
3432 }
3433 
3434 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
3435 // If a recoverable error occurs, return 1. If an irrecoverable error
3436 // occurs, return -1. An irrecoverable error is one where tokens have been
3437 // consumed in the process of trying to parse the shifter (i.e., when it is
3438 // indeed a shifter operand, but malformed).
3439 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
3440   MCAsmParser &Parser = getParser();
3441   SMLoc S = Parser.getTok().getLoc();
3442   const AsmToken &Tok = Parser.getTok();
3443   if (Tok.isNot(AsmToken::Identifier))
3444     return -1;
3445 
3446   std::string lowerCase = Tok.getString().lower();
3447   ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
3448       .Case("asl", ARM_AM::lsl)
3449       .Case("lsl", ARM_AM::lsl)
3450       .Case("lsr", ARM_AM::lsr)
3451       .Case("asr", ARM_AM::asr)
3452       .Case("ror", ARM_AM::ror)
3453       .Case("rrx", ARM_AM::rrx)
3454       .Default(ARM_AM::no_shift);
3455 
3456   if (ShiftTy == ARM_AM::no_shift)
3457     return 1;
3458 
3459   Parser.Lex(); // Eat the operator.
3460 
3461   // The source register for the shift has already been added to the
3462   // operand list, so we need to pop it off and combine it into the shifted
3463   // register operand instead.
3464   std::unique_ptr<ARMOperand> PrevOp(
3465       (ARMOperand *)Operands.pop_back_val().release());
3466   if (!PrevOp->isReg())
3467     return Error(PrevOp->getStartLoc(), "shift must be of a register");
3468   int SrcReg = PrevOp->getReg();
3469 
3470   SMLoc EndLoc;
3471   int64_t Imm = 0;
3472   int ShiftReg = 0;
3473   if (ShiftTy == ARM_AM::rrx) {
3474     // RRX Doesn't have an explicit shift amount. The encoder expects
3475     // the shift register to be the same as the source register. Seems odd,
3476     // but OK.
3477     ShiftReg = SrcReg;
3478   } else {
3479     // Figure out if this is shifted by a constant or a register (for non-RRX).
3480     if (Parser.getTok().is(AsmToken::Hash) ||
3481         Parser.getTok().is(AsmToken::Dollar)) {
3482       Parser.Lex(); // Eat hash.
3483       SMLoc ImmLoc = Parser.getTok().getLoc();
3484       const MCExpr *ShiftExpr = nullptr;
3485       if (getParser().parseExpression(ShiftExpr, EndLoc)) {
3486         Error(ImmLoc, "invalid immediate shift value");
3487         return -1;
3488       }
3489       // The expression must be evaluatable as an immediate.
3490       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
3491       if (!CE) {
3492         Error(ImmLoc, "invalid immediate shift value");
3493         return -1;
3494       }
3495       // Range check the immediate.
3496       // lsl, ror: 0 <= imm <= 31
3497       // lsr, asr: 0 <= imm <= 32
3498       Imm = CE->getValue();
3499       if (Imm < 0 ||
3500           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
3501           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
3502         Error(ImmLoc, "immediate shift value out of range");
3503         return -1;
3504       }
3505       // shift by zero is a nop. Always send it through as lsl.
3506       // ('as' compatibility)
3507       if (Imm == 0)
3508         ShiftTy = ARM_AM::lsl;
3509     } else if (Parser.getTok().is(AsmToken::Identifier)) {
3510       SMLoc L = Parser.getTok().getLoc();
3511       EndLoc = Parser.getTok().getEndLoc();
3512       ShiftReg = tryParseRegister();
3513       if (ShiftReg == -1) {
3514         Error(L, "expected immediate or register in shift operand");
3515         return -1;
3516       }
3517     } else {
3518       Error(Parser.getTok().getLoc(),
3519             "expected immediate or register in shift operand");
3520       return -1;
3521     }
3522   }
3523 
3524   if (ShiftReg && ShiftTy != ARM_AM::rrx)
3525     Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3526                                                          ShiftReg, Imm,
3527                                                          S, EndLoc));
3528   else
3529     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3530                                                           S, EndLoc));
3531 
3532   return 0;
3533 }
3534 
3535 /// Try to parse a register name.  The token must be an Identifier when called.
3536 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
3537 /// if there is a "writeback". 'true' if it's not a register.
3538 ///
3539 /// TODO this is likely to change to allow different register types and or to
3540 /// parse for a specific register type.
3541 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3542   MCAsmParser &Parser = getParser();
3543   SMLoc RegStartLoc = Parser.getTok().getLoc();
3544   SMLoc RegEndLoc = Parser.getTok().getEndLoc();
3545   int RegNo = tryParseRegister();
3546   if (RegNo == -1)
3547     return true;
3548 
3549   Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
3550 
3551   const AsmToken &ExclaimTok = Parser.getTok();
3552   if (ExclaimTok.is(AsmToken::Exclaim)) {
3553     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3554                                                ExclaimTok.getLoc()));
3555     Parser.Lex(); // Eat exclaim token
3556     return false;
3557   }
3558 
3559   // Also check for an index operand. This is only legal for vector registers,
3560   // but that'll get caught OK in operand matching, so we don't need to
3561   // explicitly filter everything else out here.
3562   if (Parser.getTok().is(AsmToken::LBrac)) {
3563     SMLoc SIdx = Parser.getTok().getLoc();
3564     Parser.Lex(); // Eat left bracket token.
3565 
3566     const MCExpr *ImmVal;
3567     if (getParser().parseExpression(ImmVal))
3568       return true;
3569     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3570     if (!MCE)
3571       return TokError("immediate value expected for vector index");
3572 
3573     if (Parser.getTok().isNot(AsmToken::RBrac))
3574       return Error(Parser.getTok().getLoc(), "']' expected");
3575 
3576     SMLoc E = Parser.getTok().getEndLoc();
3577     Parser.Lex(); // Eat right bracket token.
3578 
3579     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3580                                                      SIdx, E,
3581                                                      getContext()));
3582   }
3583 
3584   return false;
3585 }
3586 
3587 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
3588 /// instruction with a symbolic operand name.
3589 /// We accept "crN" syntax for GAS compatibility.
3590 /// <operand-name> ::= <prefix><number>
3591 /// If CoprocOp is 'c', then:
3592 ///   <prefix> ::= c | cr
3593 /// If CoprocOp is 'p', then :
3594 ///   <prefix> ::= p
3595 /// <number> ::= integer in range [0, 15]
3596 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3597   // Use the same layout as the tablegen'erated register name matcher. Ugly,
3598   // but efficient.
3599   if (Name.size() < 2 || Name[0] != CoprocOp)
3600     return -1;
3601   Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3602 
3603   switch (Name.size()) {
3604   default: return -1;
3605   case 1:
3606     switch (Name[0]) {
3607     default:  return -1;
3608     case '0': return 0;
3609     case '1': return 1;
3610     case '2': return 2;
3611     case '3': return 3;
3612     case '4': return 4;
3613     case '5': return 5;
3614     case '6': return 6;
3615     case '7': return 7;
3616     case '8': return 8;
3617     case '9': return 9;
3618     }
3619   case 2:
3620     if (Name[0] != '1')
3621       return -1;
3622     switch (Name[1]) {
3623     default:  return -1;
3624     // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3625     // However, old cores (v5/v6) did use them in that way.
3626     case '0': return 10;
3627     case '1': return 11;
3628     case '2': return 12;
3629     case '3': return 13;
3630     case '4': return 14;
3631     case '5': return 15;
3632     }
3633   }
3634 }
3635 
3636 /// parseITCondCode - Try to parse a condition code for an IT instruction.
3637 OperandMatchResultTy
3638 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3639   MCAsmParser &Parser = getParser();
3640   SMLoc S = Parser.getTok().getLoc();
3641   const AsmToken &Tok = Parser.getTok();
3642   if (!Tok.is(AsmToken::Identifier))
3643     return MatchOperand_NoMatch;
3644   unsigned CC = ARMCondCodeFromString(Tok.getString());
3645   if (CC == ~0U)
3646     return MatchOperand_NoMatch;
3647   Parser.Lex(); // Eat the token.
3648 
3649   Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3650 
3651   return MatchOperand_Success;
3652 }
3653 
3654 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3655 /// token must be an Identifier when called, and if it is a coprocessor
3656 /// number, the token is eaten and the operand is added to the operand list.
3657 OperandMatchResultTy
3658 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3659   MCAsmParser &Parser = getParser();
3660   SMLoc S = Parser.getTok().getLoc();
3661   const AsmToken &Tok = Parser.getTok();
3662   if (Tok.isNot(AsmToken::Identifier))
3663     return MatchOperand_NoMatch;
3664 
3665   int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p');
3666   if (Num == -1)
3667     return MatchOperand_NoMatch;
3668   // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3669   if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3670     return MatchOperand_NoMatch;
3671 
3672   Parser.Lex(); // Eat identifier token.
3673   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3674   return MatchOperand_Success;
3675 }
3676 
3677 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3678 /// token must be an Identifier when called, and if it is a coprocessor
3679 /// number, the token is eaten and the operand is added to the operand list.
3680 OperandMatchResultTy
3681 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3682   MCAsmParser &Parser = getParser();
3683   SMLoc S = Parser.getTok().getLoc();
3684   const AsmToken &Tok = Parser.getTok();
3685   if (Tok.isNot(AsmToken::Identifier))
3686     return MatchOperand_NoMatch;
3687 
3688   int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c');
3689   if (Reg == -1)
3690     return MatchOperand_NoMatch;
3691 
3692   Parser.Lex(); // Eat identifier token.
3693   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3694   return MatchOperand_Success;
3695 }
3696 
3697 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3698 /// coproc_option : '{' imm0_255 '}'
3699 OperandMatchResultTy
3700 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3701   MCAsmParser &Parser = getParser();
3702   SMLoc S = Parser.getTok().getLoc();
3703 
3704   // If this isn't a '{', this isn't a coprocessor immediate operand.
3705   if (Parser.getTok().isNot(AsmToken::LCurly))
3706     return MatchOperand_NoMatch;
3707   Parser.Lex(); // Eat the '{'
3708 
3709   const MCExpr *Expr;
3710   SMLoc Loc = Parser.getTok().getLoc();
3711   if (getParser().parseExpression(Expr)) {
3712     Error(Loc, "illegal expression");
3713     return MatchOperand_ParseFail;
3714   }
3715   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3716   if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3717     Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3718     return MatchOperand_ParseFail;
3719   }
3720   int Val = CE->getValue();
3721 
3722   // Check for and consume the closing '}'
3723   if (Parser.getTok().isNot(AsmToken::RCurly))
3724     return MatchOperand_ParseFail;
3725   SMLoc E = Parser.getTok().getEndLoc();
3726   Parser.Lex(); // Eat the '}'
3727 
3728   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3729   return MatchOperand_Success;
3730 }
3731 
3732 // For register list parsing, we need to map from raw GPR register numbering
3733 // to the enumeration values. The enumeration values aren't sorted by
3734 // register number due to our using "sp", "lr" and "pc" as canonical names.
3735 static unsigned getNextRegister(unsigned Reg) {
3736   // If this is a GPR, we need to do it manually, otherwise we can rely
3737   // on the sort ordering of the enumeration since the other reg-classes
3738   // are sane.
3739   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3740     return Reg + 1;
3741   switch(Reg) {
3742   default: llvm_unreachable("Invalid GPR number!");
3743   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
3744   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
3745   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
3746   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
3747   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
3748   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3749   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
3750   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
3751   }
3752 }
3753 
3754 /// Parse a register list.
3755 bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
3756   MCAsmParser &Parser = getParser();
3757   if (Parser.getTok().isNot(AsmToken::LCurly))
3758     return TokError("Token is not a Left Curly Brace");
3759   SMLoc S = Parser.getTok().getLoc();
3760   Parser.Lex(); // Eat '{' token.
3761   SMLoc RegLoc = Parser.getTok().getLoc();
3762 
3763   // Check the first register in the list to see what register class
3764   // this is a list of.
3765   int Reg = tryParseRegister();
3766   if (Reg == -1)
3767     return Error(RegLoc, "register expected");
3768 
3769   // The reglist instructions have at most 16 registers, so reserve
3770   // space for that many.
3771   int EReg = 0;
3772   SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
3773 
3774   // Allow Q regs and just interpret them as the two D sub-registers.
3775   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3776     Reg = getDRegFromQReg(Reg);
3777     EReg = MRI->getEncodingValue(Reg);
3778     Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3779     ++Reg;
3780   }
3781   const MCRegisterClass *RC;
3782   if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3783     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3784   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3785     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3786   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3787     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3788   else
3789     return Error(RegLoc, "invalid register in register list");
3790 
3791   // Store the register.
3792   EReg = MRI->getEncodingValue(Reg);
3793   Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3794 
3795   // This starts immediately after the first register token in the list,
3796   // so we can see either a comma or a minus (range separator) as a legal
3797   // next token.
3798   while (Parser.getTok().is(AsmToken::Comma) ||
3799          Parser.getTok().is(AsmToken::Minus)) {
3800     if (Parser.getTok().is(AsmToken::Minus)) {
3801       Parser.Lex(); // Eat the minus.
3802       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3803       int EndReg = tryParseRegister();
3804       if (EndReg == -1)
3805         return Error(AfterMinusLoc, "register expected");
3806       // Allow Q regs and just interpret them as the two D sub-registers.
3807       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3808         EndReg = getDRegFromQReg(EndReg) + 1;
3809       // If the register is the same as the start reg, there's nothing
3810       // more to do.
3811       if (Reg == EndReg)
3812         continue;
3813       // The register must be in the same register class as the first.
3814       if (!RC->contains(EndReg))
3815         return Error(AfterMinusLoc, "invalid register in register list");
3816       // Ranges must go from low to high.
3817       if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3818         return Error(AfterMinusLoc, "bad range in register list");
3819 
3820       // Add all the registers in the range to the register list.
3821       while (Reg != EndReg) {
3822         Reg = getNextRegister(Reg);
3823         EReg = MRI->getEncodingValue(Reg);
3824         Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3825       }
3826       continue;
3827     }
3828     Parser.Lex(); // Eat the comma.
3829     RegLoc = Parser.getTok().getLoc();
3830     int OldReg = Reg;
3831     const AsmToken RegTok = Parser.getTok();
3832     Reg = tryParseRegister();
3833     if (Reg == -1)
3834       return Error(RegLoc, "register expected");
3835     // Allow Q regs and just interpret them as the two D sub-registers.
3836     bool isQReg = false;
3837     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3838       Reg = getDRegFromQReg(Reg);
3839       isQReg = true;
3840     }
3841     // The register must be in the same register class as the first.
3842     if (!RC->contains(Reg))
3843       return Error(RegLoc, "invalid register in register list");
3844     // List must be monotonically increasing.
3845     if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3846       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3847         Warning(RegLoc, "register list not in ascending order");
3848       else
3849         return Error(RegLoc, "register list not in ascending order");
3850     }
3851     if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3852       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3853               ") in register list");
3854       continue;
3855     }
3856     // VFP register lists must also be contiguous.
3857     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3858         Reg != OldReg + 1)
3859       return Error(RegLoc, "non-contiguous register range");
3860     EReg = MRI->getEncodingValue(Reg);
3861     Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3862     if (isQReg) {
3863       EReg = MRI->getEncodingValue(++Reg);
3864       Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3865     }
3866   }
3867 
3868   if (Parser.getTok().isNot(AsmToken::RCurly))
3869     return Error(Parser.getTok().getLoc(), "'}' expected");
3870   SMLoc E = Parser.getTok().getEndLoc();
3871   Parser.Lex(); // Eat '}' token.
3872 
3873   // Push the register list operand.
3874   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3875 
3876   // The ARM system instruction variants for LDM/STM have a '^' token here.
3877   if (Parser.getTok().is(AsmToken::Caret)) {
3878     Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3879     Parser.Lex(); // Eat '^' token.
3880   }
3881 
3882   return false;
3883 }
3884 
3885 // Helper function to parse the lane index for vector lists.
3886 OperandMatchResultTy ARMAsmParser::
3887 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3888   MCAsmParser &Parser = getParser();
3889   Index = 0; // Always return a defined index value.
3890   if (Parser.getTok().is(AsmToken::LBrac)) {
3891     Parser.Lex(); // Eat the '['.
3892     if (Parser.getTok().is(AsmToken::RBrac)) {
3893       // "Dn[]" is the 'all lanes' syntax.
3894       LaneKind = AllLanes;
3895       EndLoc = Parser.getTok().getEndLoc();
3896       Parser.Lex(); // Eat the ']'.
3897       return MatchOperand_Success;
3898     }
3899 
3900     // There's an optional '#' token here. Normally there wouldn't be, but
3901     // inline assemble puts one in, and it's friendly to accept that.
3902     if (Parser.getTok().is(AsmToken::Hash))
3903       Parser.Lex(); // Eat '#' or '$'.
3904 
3905     const MCExpr *LaneIndex;
3906     SMLoc Loc = Parser.getTok().getLoc();
3907     if (getParser().parseExpression(LaneIndex)) {
3908       Error(Loc, "illegal expression");
3909       return MatchOperand_ParseFail;
3910     }
3911     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3912     if (!CE) {
3913       Error(Loc, "lane index must be empty or an integer");
3914       return MatchOperand_ParseFail;
3915     }
3916     if (Parser.getTok().isNot(AsmToken::RBrac)) {
3917       Error(Parser.getTok().getLoc(), "']' expected");
3918       return MatchOperand_ParseFail;
3919     }
3920     EndLoc = Parser.getTok().getEndLoc();
3921     Parser.Lex(); // Eat the ']'.
3922     int64_t Val = CE->getValue();
3923 
3924     // FIXME: Make this range check context sensitive for .8, .16, .32.
3925     if (Val < 0 || Val > 7) {
3926       Error(Parser.getTok().getLoc(), "lane index out of range");
3927       return MatchOperand_ParseFail;
3928     }
3929     Index = Val;
3930     LaneKind = IndexedLane;
3931     return MatchOperand_Success;
3932   }
3933   LaneKind = NoLanes;
3934   return MatchOperand_Success;
3935 }
3936 
3937 // parse a vector register list
3938 OperandMatchResultTy
3939 ARMAsmParser::parseVectorList(OperandVector &Operands) {
3940   MCAsmParser &Parser = getParser();
3941   VectorLaneTy LaneKind;
3942   unsigned LaneIndex;
3943   SMLoc S = Parser.getTok().getLoc();
3944   // As an extension (to match gas), support a plain D register or Q register
3945   // (without encosing curly braces) as a single or double entry list,
3946   // respectively.
3947   if (Parser.getTok().is(AsmToken::Identifier)) {
3948     SMLoc E = Parser.getTok().getEndLoc();
3949     int Reg = tryParseRegister();
3950     if (Reg == -1)
3951       return MatchOperand_NoMatch;
3952     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3953       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3954       if (Res != MatchOperand_Success)
3955         return Res;
3956       switch (LaneKind) {
3957       case NoLanes:
3958         Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3959         break;
3960       case AllLanes:
3961         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3962                                                                 S, E));
3963         break;
3964       case IndexedLane:
3965         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3966                                                                LaneIndex,
3967                                                                false, S, E));
3968         break;
3969       }
3970       return MatchOperand_Success;
3971     }
3972     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3973       Reg = getDRegFromQReg(Reg);
3974       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3975       if (Res != MatchOperand_Success)
3976         return Res;
3977       switch (LaneKind) {
3978       case NoLanes:
3979         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3980                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3981         Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3982         break;
3983       case AllLanes:
3984         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3985                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3986         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3987                                                                 S, E));
3988         break;
3989       case IndexedLane:
3990         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3991                                                                LaneIndex,
3992                                                                false, S, E));
3993         break;
3994       }
3995       return MatchOperand_Success;
3996     }
3997     Error(S, "vector register expected");
3998     return MatchOperand_ParseFail;
3999   }
4000 
4001   if (Parser.getTok().isNot(AsmToken::LCurly))
4002     return MatchOperand_NoMatch;
4003 
4004   Parser.Lex(); // Eat '{' token.
4005   SMLoc RegLoc = Parser.getTok().getLoc();
4006 
4007   int Reg = tryParseRegister();
4008   if (Reg == -1) {
4009     Error(RegLoc, "register expected");
4010     return MatchOperand_ParseFail;
4011   }
4012   unsigned Count = 1;
4013   int Spacing = 0;
4014   unsigned FirstReg = Reg;
4015   // The list is of D registers, but we also allow Q regs and just interpret
4016   // them as the two D sub-registers.
4017   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4018     FirstReg = Reg = getDRegFromQReg(Reg);
4019     Spacing = 1; // double-spacing requires explicit D registers, otherwise
4020                  // it's ambiguous with four-register single spaced.
4021     ++Reg;
4022     ++Count;
4023   }
4024 
4025   SMLoc E;
4026   if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
4027     return MatchOperand_ParseFail;
4028 
4029   while (Parser.getTok().is(AsmToken::Comma) ||
4030          Parser.getTok().is(AsmToken::Minus)) {
4031     if (Parser.getTok().is(AsmToken::Minus)) {
4032       if (!Spacing)
4033         Spacing = 1; // Register range implies a single spaced list.
4034       else if (Spacing == 2) {
4035         Error(Parser.getTok().getLoc(),
4036               "sequential registers in double spaced list");
4037         return MatchOperand_ParseFail;
4038       }
4039       Parser.Lex(); // Eat the minus.
4040       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
4041       int EndReg = tryParseRegister();
4042       if (EndReg == -1) {
4043         Error(AfterMinusLoc, "register expected");
4044         return MatchOperand_ParseFail;
4045       }
4046       // Allow Q regs and just interpret them as the two D sub-registers.
4047       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
4048         EndReg = getDRegFromQReg(EndReg) + 1;
4049       // If the register is the same as the start reg, there's nothing
4050       // more to do.
4051       if (Reg == EndReg)
4052         continue;
4053       // The register must be in the same register class as the first.
4054       if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
4055         Error(AfterMinusLoc, "invalid register in register list");
4056         return MatchOperand_ParseFail;
4057       }
4058       // Ranges must go from low to high.
4059       if (Reg > EndReg) {
4060         Error(AfterMinusLoc, "bad range in register list");
4061         return MatchOperand_ParseFail;
4062       }
4063       // Parse the lane specifier if present.
4064       VectorLaneTy NextLaneKind;
4065       unsigned NextLaneIndex;
4066       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4067           MatchOperand_Success)
4068         return MatchOperand_ParseFail;
4069       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4070         Error(AfterMinusLoc, "mismatched lane index in register list");
4071         return MatchOperand_ParseFail;
4072       }
4073 
4074       // Add all the registers in the range to the register list.
4075       Count += EndReg - Reg;
4076       Reg = EndReg;
4077       continue;
4078     }
4079     Parser.Lex(); // Eat the comma.
4080     RegLoc = Parser.getTok().getLoc();
4081     int OldReg = Reg;
4082     Reg = tryParseRegister();
4083     if (Reg == -1) {
4084       Error(RegLoc, "register expected");
4085       return MatchOperand_ParseFail;
4086     }
4087     // vector register lists must be contiguous.
4088     // It's OK to use the enumeration values directly here rather, as the
4089     // VFP register classes have the enum sorted properly.
4090     //
4091     // The list is of D registers, but we also allow Q regs and just interpret
4092     // them as the two D sub-registers.
4093     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
4094       if (!Spacing)
4095         Spacing = 1; // Register range implies a single spaced list.
4096       else if (Spacing == 2) {
4097         Error(RegLoc,
4098               "invalid register in double-spaced list (must be 'D' register')");
4099         return MatchOperand_ParseFail;
4100       }
4101       Reg = getDRegFromQReg(Reg);
4102       if (Reg != OldReg + 1) {
4103         Error(RegLoc, "non-contiguous register range");
4104         return MatchOperand_ParseFail;
4105       }
4106       ++Reg;
4107       Count += 2;
4108       // Parse the lane specifier if present.
4109       VectorLaneTy NextLaneKind;
4110       unsigned NextLaneIndex;
4111       SMLoc LaneLoc = Parser.getTok().getLoc();
4112       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
4113           MatchOperand_Success)
4114         return MatchOperand_ParseFail;
4115       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4116         Error(LaneLoc, "mismatched lane index in register list");
4117         return MatchOperand_ParseFail;
4118       }
4119       continue;
4120     }
4121     // Normal D register.
4122     // Figure out the register spacing (single or double) of the list if
4123     // we don't know it already.
4124     if (!Spacing)
4125       Spacing = 1 + (Reg == OldReg + 2);
4126 
4127     // Just check that it's contiguous and keep going.
4128     if (Reg != OldReg + Spacing) {
4129       Error(RegLoc, "non-contiguous register range");
4130       return MatchOperand_ParseFail;
4131     }
4132     ++Count;
4133     // Parse the lane specifier if present.
4134     VectorLaneTy NextLaneKind;
4135     unsigned NextLaneIndex;
4136     SMLoc EndLoc = Parser.getTok().getLoc();
4137     if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4138       return MatchOperand_ParseFail;
4139     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4140       Error(EndLoc, "mismatched lane index in register list");
4141       return MatchOperand_ParseFail;
4142     }
4143   }
4144 
4145   if (Parser.getTok().isNot(AsmToken::RCurly)) {
4146     Error(Parser.getTok().getLoc(), "'}' expected");
4147     return MatchOperand_ParseFail;
4148   }
4149   E = Parser.getTok().getEndLoc();
4150   Parser.Lex(); // Eat '}' token.
4151 
4152   switch (LaneKind) {
4153   case NoLanes:
4154     // Two-register operands have been converted to the
4155     // composite register classes.
4156     if (Count == 2) {
4157       const MCRegisterClass *RC = (Spacing == 1) ?
4158         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4159         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4160       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4161     }
4162     Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
4163                                                     (Spacing == 2), S, E));
4164     break;
4165   case AllLanes:
4166     // Two-register operands have been converted to the
4167     // composite register classes.
4168     if (Count == 2) {
4169       const MCRegisterClass *RC = (Spacing == 1) ?
4170         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4171         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4172       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4173     }
4174     Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
4175                                                             (Spacing == 2),
4176                                                             S, E));
4177     break;
4178   case IndexedLane:
4179     Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4180                                                            LaneIndex,
4181                                                            (Spacing == 2),
4182                                                            S, E));
4183     break;
4184   }
4185   return MatchOperand_Success;
4186 }
4187 
4188 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4189 OperandMatchResultTy
4190 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4191   MCAsmParser &Parser = getParser();
4192   SMLoc S = Parser.getTok().getLoc();
4193   const AsmToken &Tok = Parser.getTok();
4194   unsigned Opt;
4195 
4196   if (Tok.is(AsmToken::Identifier)) {
4197     StringRef OptStr = Tok.getString();
4198 
4199     Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4200       .Case("sy",    ARM_MB::SY)
4201       .Case("st",    ARM_MB::ST)
4202       .Case("ld",    ARM_MB::LD)
4203       .Case("sh",    ARM_MB::ISH)
4204       .Case("ish",   ARM_MB::ISH)
4205       .Case("shst",  ARM_MB::ISHST)
4206       .Case("ishst", ARM_MB::ISHST)
4207       .Case("ishld", ARM_MB::ISHLD)
4208       .Case("nsh",   ARM_MB::NSH)
4209       .Case("un",    ARM_MB::NSH)
4210       .Case("nshst", ARM_MB::NSHST)
4211       .Case("nshld", ARM_MB::NSHLD)
4212       .Case("unst",  ARM_MB::NSHST)
4213       .Case("osh",   ARM_MB::OSH)
4214       .Case("oshst", ARM_MB::OSHST)
4215       .Case("oshld", ARM_MB::OSHLD)
4216       .Default(~0U);
4217 
4218     // ishld, oshld, nshld and ld are only available from ARMv8.
4219     if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4220                         Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4221       Opt = ~0U;
4222 
4223     if (Opt == ~0U)
4224       return MatchOperand_NoMatch;
4225 
4226     Parser.Lex(); // Eat identifier token.
4227   } else if (Tok.is(AsmToken::Hash) ||
4228              Tok.is(AsmToken::Dollar) ||
4229              Tok.is(AsmToken::Integer)) {
4230     if (Parser.getTok().isNot(AsmToken::Integer))
4231       Parser.Lex(); // Eat '#' or '$'.
4232     SMLoc Loc = Parser.getTok().getLoc();
4233 
4234     const MCExpr *MemBarrierID;
4235     if (getParser().parseExpression(MemBarrierID)) {
4236       Error(Loc, "illegal expression");
4237       return MatchOperand_ParseFail;
4238     }
4239 
4240     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4241     if (!CE) {
4242       Error(Loc, "constant expression expected");
4243       return MatchOperand_ParseFail;
4244     }
4245 
4246     int Val = CE->getValue();
4247     if (Val & ~0xf) {
4248       Error(Loc, "immediate value out of range");
4249       return MatchOperand_ParseFail;
4250     }
4251 
4252     Opt = ARM_MB::RESERVED_0 + Val;
4253   } else
4254     return MatchOperand_ParseFail;
4255 
4256   Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4257   return MatchOperand_Success;
4258 }
4259 
4260 OperandMatchResultTy
4261 ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) {
4262   MCAsmParser &Parser = getParser();
4263   SMLoc S = Parser.getTok().getLoc();
4264   const AsmToken &Tok = Parser.getTok();
4265 
4266   if (Tok.isNot(AsmToken::Identifier))
4267      return MatchOperand_NoMatch;
4268 
4269   if (!Tok.getString().equals_lower("csync"))
4270     return MatchOperand_NoMatch;
4271 
4272   Parser.Lex(); // Eat identifier token.
4273 
4274   Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S));
4275   return MatchOperand_Success;
4276 }
4277 
4278 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4279 OperandMatchResultTy
4280 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4281   MCAsmParser &Parser = getParser();
4282   SMLoc S = Parser.getTok().getLoc();
4283   const AsmToken &Tok = Parser.getTok();
4284   unsigned Opt;
4285 
4286   if (Tok.is(AsmToken::Identifier)) {
4287     StringRef OptStr = Tok.getString();
4288 
4289     if (OptStr.equals_lower("sy"))
4290       Opt = ARM_ISB::SY;
4291     else
4292       return MatchOperand_NoMatch;
4293 
4294     Parser.Lex(); // Eat identifier token.
4295   } else if (Tok.is(AsmToken::Hash) ||
4296              Tok.is(AsmToken::Dollar) ||
4297              Tok.is(AsmToken::Integer)) {
4298     if (Parser.getTok().isNot(AsmToken::Integer))
4299       Parser.Lex(); // Eat '#' or '$'.
4300     SMLoc Loc = Parser.getTok().getLoc();
4301 
4302     const MCExpr *ISBarrierID;
4303     if (getParser().parseExpression(ISBarrierID)) {
4304       Error(Loc, "illegal expression");
4305       return MatchOperand_ParseFail;
4306     }
4307 
4308     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
4309     if (!CE) {
4310       Error(Loc, "constant expression expected");
4311       return MatchOperand_ParseFail;
4312     }
4313 
4314     int Val = CE->getValue();
4315     if (Val & ~0xf) {
4316       Error(Loc, "immediate value out of range");
4317       return MatchOperand_ParseFail;
4318     }
4319 
4320     Opt = ARM_ISB::RESERVED_0 + Val;
4321   } else
4322     return MatchOperand_ParseFail;
4323 
4324   Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
4325           (ARM_ISB::InstSyncBOpt)Opt, S));
4326   return MatchOperand_Success;
4327 }
4328 
4329 
4330 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
4331 OperandMatchResultTy
4332 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
4333   MCAsmParser &Parser = getParser();
4334   SMLoc S = Parser.getTok().getLoc();
4335   const AsmToken &Tok = Parser.getTok();
4336   if (!Tok.is(AsmToken::Identifier))
4337     return MatchOperand_NoMatch;
4338   StringRef IFlagsStr = Tok.getString();
4339 
4340   // An iflags string of "none" is interpreted to mean that none of the AIF
4341   // bits are set.  Not a terribly useful instruction, but a valid encoding.
4342   unsigned IFlags = 0;
4343   if (IFlagsStr != "none") {
4344         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
4345       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
4346         .Case("a", ARM_PROC::A)
4347         .Case("i", ARM_PROC::I)
4348         .Case("f", ARM_PROC::F)
4349         .Default(~0U);
4350 
4351       // If some specific iflag is already set, it means that some letter is
4352       // present more than once, this is not acceptable.
4353       if (Flag == ~0U || (IFlags & Flag))
4354         return MatchOperand_NoMatch;
4355 
4356       IFlags |= Flag;
4357     }
4358   }
4359 
4360   Parser.Lex(); // Eat identifier token.
4361   Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
4362   return MatchOperand_Success;
4363 }
4364 
4365 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
4366 OperandMatchResultTy
4367 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
4368   MCAsmParser &Parser = getParser();
4369   SMLoc S = Parser.getTok().getLoc();
4370   const AsmToken &Tok = Parser.getTok();
4371 
4372   if (Tok.is(AsmToken::Integer)) {
4373     int64_t Val = Tok.getIntVal();
4374     if (Val > 255 || Val < 0) {
4375       return MatchOperand_NoMatch;
4376     }
4377     unsigned SYSmvalue = Val & 0xFF;
4378     Parser.Lex();
4379     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4380     return MatchOperand_Success;
4381   }
4382 
4383   if (!Tok.is(AsmToken::Identifier))
4384     return MatchOperand_NoMatch;
4385   StringRef Mask = Tok.getString();
4386 
4387   if (isMClass()) {
4388     auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
4389     if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
4390       return MatchOperand_NoMatch;
4391 
4392     unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
4393 
4394     Parser.Lex(); // Eat identifier token.
4395     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4396     return MatchOperand_Success;
4397   }
4398 
4399   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
4400   size_t Start = 0, Next = Mask.find('_');
4401   StringRef Flags = "";
4402   std::string SpecReg = Mask.slice(Start, Next).lower();
4403   if (Next != StringRef::npos)
4404     Flags = Mask.slice(Next+1, Mask.size());
4405 
4406   // FlagsVal contains the complete mask:
4407   // 3-0: Mask
4408   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4409   unsigned FlagsVal = 0;
4410 
4411   if (SpecReg == "apsr") {
4412     FlagsVal = StringSwitch<unsigned>(Flags)
4413     .Case("nzcvq",  0x8) // same as CPSR_f
4414     .Case("g",      0x4) // same as CPSR_s
4415     .Case("nzcvqg", 0xc) // same as CPSR_fs
4416     .Default(~0U);
4417 
4418     if (FlagsVal == ~0U) {
4419       if (!Flags.empty())
4420         return MatchOperand_NoMatch;
4421       else
4422         FlagsVal = 8; // No flag
4423     }
4424   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
4425     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
4426     if (Flags == "all" || Flags == "")
4427       Flags = "fc";
4428     for (int i = 0, e = Flags.size(); i != e; ++i) {
4429       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
4430       .Case("c", 1)
4431       .Case("x", 2)
4432       .Case("s", 4)
4433       .Case("f", 8)
4434       .Default(~0U);
4435 
4436       // If some specific flag is already set, it means that some letter is
4437       // present more than once, this is not acceptable.
4438       if (Flag == ~0U || (FlagsVal & Flag))
4439         return MatchOperand_NoMatch;
4440       FlagsVal |= Flag;
4441     }
4442   } else // No match for special register.
4443     return MatchOperand_NoMatch;
4444 
4445   // Special register without flags is NOT equivalent to "fc" flags.
4446   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
4447   // two lines would enable gas compatibility at the expense of breaking
4448   // round-tripping.
4449   //
4450   // if (!FlagsVal)
4451   //  FlagsVal = 0x9;
4452 
4453   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4454   if (SpecReg == "spsr")
4455     FlagsVal |= 16;
4456 
4457   Parser.Lex(); // Eat identifier token.
4458   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4459   return MatchOperand_Success;
4460 }
4461 
4462 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
4463 /// use in the MRS/MSR instructions added to support virtualization.
4464 OperandMatchResultTy
4465 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
4466   MCAsmParser &Parser = getParser();
4467   SMLoc S = Parser.getTok().getLoc();
4468   const AsmToken &Tok = Parser.getTok();
4469   if (!Tok.is(AsmToken::Identifier))
4470     return MatchOperand_NoMatch;
4471   StringRef RegName = Tok.getString();
4472 
4473   auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
4474   if (!TheReg)
4475     return MatchOperand_NoMatch;
4476   unsigned Encoding = TheReg->Encoding;
4477 
4478   Parser.Lex(); // Eat identifier token.
4479   Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
4480   return MatchOperand_Success;
4481 }
4482 
4483 OperandMatchResultTy
4484 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4485                           int High) {
4486   MCAsmParser &Parser = getParser();
4487   const AsmToken &Tok = Parser.getTok();
4488   if (Tok.isNot(AsmToken::Identifier)) {
4489     Error(Parser.getTok().getLoc(), Op + " operand expected.");
4490     return MatchOperand_ParseFail;
4491   }
4492   StringRef ShiftName = Tok.getString();
4493   std::string LowerOp = Op.lower();
4494   std::string UpperOp = Op.upper();
4495   if (ShiftName != LowerOp && ShiftName != UpperOp) {
4496     Error(Parser.getTok().getLoc(), Op + " operand expected.");
4497     return MatchOperand_ParseFail;
4498   }
4499   Parser.Lex(); // Eat shift type token.
4500 
4501   // There must be a '#' and a shift amount.
4502   if (Parser.getTok().isNot(AsmToken::Hash) &&
4503       Parser.getTok().isNot(AsmToken::Dollar)) {
4504     Error(Parser.getTok().getLoc(), "'#' expected");
4505     return MatchOperand_ParseFail;
4506   }
4507   Parser.Lex(); // Eat hash token.
4508 
4509   const MCExpr *ShiftAmount;
4510   SMLoc Loc = Parser.getTok().getLoc();
4511   SMLoc EndLoc;
4512   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4513     Error(Loc, "illegal expression");
4514     return MatchOperand_ParseFail;
4515   }
4516   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4517   if (!CE) {
4518     Error(Loc, "constant expression expected");
4519     return MatchOperand_ParseFail;
4520   }
4521   int Val = CE->getValue();
4522   if (Val < Low || Val > High) {
4523     Error(Loc, "immediate value out of range");
4524     return MatchOperand_ParseFail;
4525   }
4526 
4527   Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4528 
4529   return MatchOperand_Success;
4530 }
4531 
4532 OperandMatchResultTy
4533 ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4534   MCAsmParser &Parser = getParser();
4535   const AsmToken &Tok = Parser.getTok();
4536   SMLoc S = Tok.getLoc();
4537   if (Tok.isNot(AsmToken::Identifier)) {
4538     Error(S, "'be' or 'le' operand expected");
4539     return MatchOperand_ParseFail;
4540   }
4541   int Val = StringSwitch<int>(Tok.getString().lower())
4542     .Case("be", 1)
4543     .Case("le", 0)
4544     .Default(-1);
4545   Parser.Lex(); // Eat the token.
4546 
4547   if (Val == -1) {
4548     Error(S, "'be' or 'le' operand expected");
4549     return MatchOperand_ParseFail;
4550   }
4551   Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
4552                                                                   getContext()),
4553                                            S, Tok.getEndLoc()));
4554   return MatchOperand_Success;
4555 }
4556 
4557 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4558 /// instructions. Legal values are:
4559 ///     lsl #n  'n' in [0,31]
4560 ///     asr #n  'n' in [1,32]
4561 ///             n == 32 encoded as n == 0.
4562 OperandMatchResultTy
4563 ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4564   MCAsmParser &Parser = getParser();
4565   const AsmToken &Tok = Parser.getTok();
4566   SMLoc S = Tok.getLoc();
4567   if (Tok.isNot(AsmToken::Identifier)) {
4568     Error(S, "shift operator 'asr' or 'lsl' expected");
4569     return MatchOperand_ParseFail;
4570   }
4571   StringRef ShiftName = Tok.getString();
4572   bool isASR;
4573   if (ShiftName == "lsl" || ShiftName == "LSL")
4574     isASR = false;
4575   else if (ShiftName == "asr" || ShiftName == "ASR")
4576     isASR = true;
4577   else {
4578     Error(S, "shift operator 'asr' or 'lsl' expected");
4579     return MatchOperand_ParseFail;
4580   }
4581   Parser.Lex(); // Eat the operator.
4582 
4583   // A '#' and a shift amount.
4584   if (Parser.getTok().isNot(AsmToken::Hash) &&
4585       Parser.getTok().isNot(AsmToken::Dollar)) {
4586     Error(Parser.getTok().getLoc(), "'#' expected");
4587     return MatchOperand_ParseFail;
4588   }
4589   Parser.Lex(); // Eat hash token.
4590   SMLoc ExLoc = Parser.getTok().getLoc();
4591 
4592   const MCExpr *ShiftAmount;
4593   SMLoc EndLoc;
4594   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4595     Error(ExLoc, "malformed shift expression");
4596     return MatchOperand_ParseFail;
4597   }
4598   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4599   if (!CE) {
4600     Error(ExLoc, "shift amount must be an immediate");
4601     return MatchOperand_ParseFail;
4602   }
4603 
4604   int64_t Val = CE->getValue();
4605   if (isASR) {
4606     // Shift amount must be in [1,32]
4607     if (Val < 1 || Val > 32) {
4608       Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4609       return MatchOperand_ParseFail;
4610     }
4611     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4612     if (isThumb() && Val == 32) {
4613       Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4614       return MatchOperand_ParseFail;
4615     }
4616     if (Val == 32) Val = 0;
4617   } else {
4618     // Shift amount must be in [1,32]
4619     if (Val < 0 || Val > 31) {
4620       Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4621       return MatchOperand_ParseFail;
4622     }
4623   }
4624 
4625   Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4626 
4627   return MatchOperand_Success;
4628 }
4629 
4630 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4631 /// of instructions. Legal values are:
4632 ///     ror #n  'n' in {0, 8, 16, 24}
4633 OperandMatchResultTy
4634 ARMAsmParser::parseRotImm(OperandVector &Operands) {
4635   MCAsmParser &Parser = getParser();
4636   const AsmToken &Tok = Parser.getTok();
4637   SMLoc S = Tok.getLoc();
4638   if (Tok.isNot(AsmToken::Identifier))
4639     return MatchOperand_NoMatch;
4640   StringRef ShiftName = Tok.getString();
4641   if (ShiftName != "ror" && ShiftName != "ROR")
4642     return MatchOperand_NoMatch;
4643   Parser.Lex(); // Eat the operator.
4644 
4645   // A '#' and a rotate amount.
4646   if (Parser.getTok().isNot(AsmToken::Hash) &&
4647       Parser.getTok().isNot(AsmToken::Dollar)) {
4648     Error(Parser.getTok().getLoc(), "'#' expected");
4649     return MatchOperand_ParseFail;
4650   }
4651   Parser.Lex(); // Eat hash token.
4652   SMLoc ExLoc = Parser.getTok().getLoc();
4653 
4654   const MCExpr *ShiftAmount;
4655   SMLoc EndLoc;
4656   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4657     Error(ExLoc, "malformed rotate expression");
4658     return MatchOperand_ParseFail;
4659   }
4660   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4661   if (!CE) {
4662     Error(ExLoc, "rotate amount must be an immediate");
4663     return MatchOperand_ParseFail;
4664   }
4665 
4666   int64_t Val = CE->getValue();
4667   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4668   // normally, zero is represented in asm by omitting the rotate operand
4669   // entirely.
4670   if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4671     Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4672     return MatchOperand_ParseFail;
4673   }
4674 
4675   Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4676 
4677   return MatchOperand_Success;
4678 }
4679 
4680 OperandMatchResultTy
4681 ARMAsmParser::parseModImm(OperandVector &Operands) {
4682   MCAsmParser &Parser = getParser();
4683   MCAsmLexer &Lexer = getLexer();
4684   int64_t Imm1, Imm2;
4685 
4686   SMLoc S = Parser.getTok().getLoc();
4687 
4688   // 1) A mod_imm operand can appear in the place of a register name:
4689   //   add r0, #mod_imm
4690   //   add r0, r0, #mod_imm
4691   // to correctly handle the latter, we bail out as soon as we see an
4692   // identifier.
4693   //
4694   // 2) Similarly, we do not want to parse into complex operands:
4695   //   mov r0, #mod_imm
4696   //   mov r0, :lower16:(_foo)
4697   if (Parser.getTok().is(AsmToken::Identifier) ||
4698       Parser.getTok().is(AsmToken::Colon))
4699     return MatchOperand_NoMatch;
4700 
4701   // Hash (dollar) is optional as per the ARMARM
4702   if (Parser.getTok().is(AsmToken::Hash) ||
4703       Parser.getTok().is(AsmToken::Dollar)) {
4704     // Avoid parsing into complex operands (#:)
4705     if (Lexer.peekTok().is(AsmToken::Colon))
4706       return MatchOperand_NoMatch;
4707 
4708     // Eat the hash (dollar)
4709     Parser.Lex();
4710   }
4711 
4712   SMLoc Sx1, Ex1;
4713   Sx1 = Parser.getTok().getLoc();
4714   const MCExpr *Imm1Exp;
4715   if (getParser().parseExpression(Imm1Exp, Ex1)) {
4716     Error(Sx1, "malformed expression");
4717     return MatchOperand_ParseFail;
4718   }
4719 
4720   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
4721 
4722   if (CE) {
4723     // Immediate must fit within 32-bits
4724     Imm1 = CE->getValue();
4725     int Enc = ARM_AM::getSOImmVal(Imm1);
4726     if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
4727       // We have a match!
4728       Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
4729                                                   (Enc & 0xF00) >> 7,
4730                                                   Sx1, Ex1));
4731       return MatchOperand_Success;
4732     }
4733 
4734     // We have parsed an immediate which is not for us, fallback to a plain
4735     // immediate. This can happen for instruction aliases. For an example,
4736     // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
4737     // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
4738     // instruction with a mod_imm operand. The alias is defined such that the
4739     // parser method is shared, that's why we have to do this here.
4740     if (Parser.getTok().is(AsmToken::EndOfStatement)) {
4741       Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4742       return MatchOperand_Success;
4743     }
4744   } else {
4745     // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
4746     // MCFixup). Fallback to a plain immediate.
4747     Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4748     return MatchOperand_Success;
4749   }
4750 
4751   // From this point onward, we expect the input to be a (#bits, #rot) pair
4752   if (Parser.getTok().isNot(AsmToken::Comma)) {
4753     Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
4754     return MatchOperand_ParseFail;
4755   }
4756 
4757   if (Imm1 & ~0xFF) {
4758     Error(Sx1, "immediate operand must a number in the range [0, 255]");
4759     return MatchOperand_ParseFail;
4760   }
4761 
4762   // Eat the comma
4763   Parser.Lex();
4764 
4765   // Repeat for #rot
4766   SMLoc Sx2, Ex2;
4767   Sx2 = Parser.getTok().getLoc();
4768 
4769   // Eat the optional hash (dollar)
4770   if (Parser.getTok().is(AsmToken::Hash) ||
4771       Parser.getTok().is(AsmToken::Dollar))
4772     Parser.Lex();
4773 
4774   const MCExpr *Imm2Exp;
4775   if (getParser().parseExpression(Imm2Exp, Ex2)) {
4776     Error(Sx2, "malformed expression");
4777     return MatchOperand_ParseFail;
4778   }
4779 
4780   CE = dyn_cast<MCConstantExpr>(Imm2Exp);
4781 
4782   if (CE) {
4783     Imm2 = CE->getValue();
4784     if (!(Imm2 & ~0x1E)) {
4785       // We have a match!
4786       Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
4787       return MatchOperand_Success;
4788     }
4789     Error(Sx2, "immediate operand must an even number in the range [0, 30]");
4790     return MatchOperand_ParseFail;
4791   } else {
4792     Error(Sx2, "constant expression expected");
4793     return MatchOperand_ParseFail;
4794   }
4795 }
4796 
4797 OperandMatchResultTy
4798 ARMAsmParser::parseBitfield(OperandVector &Operands) {
4799   MCAsmParser &Parser = getParser();
4800   SMLoc S = Parser.getTok().getLoc();
4801   // The bitfield descriptor is really two operands, the LSB and the width.
4802   if (Parser.getTok().isNot(AsmToken::Hash) &&
4803       Parser.getTok().isNot(AsmToken::Dollar)) {
4804     Error(Parser.getTok().getLoc(), "'#' expected");
4805     return MatchOperand_ParseFail;
4806   }
4807   Parser.Lex(); // Eat hash token.
4808 
4809   const MCExpr *LSBExpr;
4810   SMLoc E = Parser.getTok().getLoc();
4811   if (getParser().parseExpression(LSBExpr)) {
4812     Error(E, "malformed immediate expression");
4813     return MatchOperand_ParseFail;
4814   }
4815   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
4816   if (!CE) {
4817     Error(E, "'lsb' operand must be an immediate");
4818     return MatchOperand_ParseFail;
4819   }
4820 
4821   int64_t LSB = CE->getValue();
4822   // The LSB must be in the range [0,31]
4823   if (LSB < 0 || LSB > 31) {
4824     Error(E, "'lsb' operand must be in the range [0,31]");
4825     return MatchOperand_ParseFail;
4826   }
4827   E = Parser.getTok().getLoc();
4828 
4829   // Expect another immediate operand.
4830   if (Parser.getTok().isNot(AsmToken::Comma)) {
4831     Error(Parser.getTok().getLoc(), "too few operands");
4832     return MatchOperand_ParseFail;
4833   }
4834   Parser.Lex(); // Eat hash token.
4835   if (Parser.getTok().isNot(AsmToken::Hash) &&
4836       Parser.getTok().isNot(AsmToken::Dollar)) {
4837     Error(Parser.getTok().getLoc(), "'#' expected");
4838     return MatchOperand_ParseFail;
4839   }
4840   Parser.Lex(); // Eat hash token.
4841 
4842   const MCExpr *WidthExpr;
4843   SMLoc EndLoc;
4844   if (getParser().parseExpression(WidthExpr, EndLoc)) {
4845     Error(E, "malformed immediate expression");
4846     return MatchOperand_ParseFail;
4847   }
4848   CE = dyn_cast<MCConstantExpr>(WidthExpr);
4849   if (!CE) {
4850     Error(E, "'width' operand must be an immediate");
4851     return MatchOperand_ParseFail;
4852   }
4853 
4854   int64_t Width = CE->getValue();
4855   // The LSB must be in the range [1,32-lsb]
4856   if (Width < 1 || Width > 32 - LSB) {
4857     Error(E, "'width' operand must be in the range [1,32-lsb]");
4858     return MatchOperand_ParseFail;
4859   }
4860 
4861   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4862 
4863   return MatchOperand_Success;
4864 }
4865 
4866 OperandMatchResultTy
4867 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
4868   // Check for a post-index addressing register operand. Specifically:
4869   // postidx_reg := '+' register {, shift}
4870   //              | '-' register {, shift}
4871   //              | register {, shift}
4872 
4873   // This method must return MatchOperand_NoMatch without consuming any tokens
4874   // in the case where there is no match, as other alternatives take other
4875   // parse methods.
4876   MCAsmParser &Parser = getParser();
4877   AsmToken Tok = Parser.getTok();
4878   SMLoc S = Tok.getLoc();
4879   bool haveEaten = false;
4880   bool isAdd = true;
4881   if (Tok.is(AsmToken::Plus)) {
4882     Parser.Lex(); // Eat the '+' token.
4883     haveEaten = true;
4884   } else if (Tok.is(AsmToken::Minus)) {
4885     Parser.Lex(); // Eat the '-' token.
4886     isAdd = false;
4887     haveEaten = true;
4888   }
4889 
4890   SMLoc E = Parser.getTok().getEndLoc();
4891   int Reg = tryParseRegister();
4892   if (Reg == -1) {
4893     if (!haveEaten)
4894       return MatchOperand_NoMatch;
4895     Error(Parser.getTok().getLoc(), "register expected");
4896     return MatchOperand_ParseFail;
4897   }
4898 
4899   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
4900   unsigned ShiftImm = 0;
4901   if (Parser.getTok().is(AsmToken::Comma)) {
4902     Parser.Lex(); // Eat the ','.
4903     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4904       return MatchOperand_ParseFail;
4905 
4906     // FIXME: Only approximates end...may include intervening whitespace.
4907     E = Parser.getTok().getLoc();
4908   }
4909 
4910   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4911                                                   ShiftImm, S, E));
4912 
4913   return MatchOperand_Success;
4914 }
4915 
4916 OperandMatchResultTy
4917 ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
4918   // Check for a post-index addressing register operand. Specifically:
4919   // am3offset := '+' register
4920   //              | '-' register
4921   //              | register
4922   //              | # imm
4923   //              | # + imm
4924   //              | # - imm
4925 
4926   // This method must return MatchOperand_NoMatch without consuming any tokens
4927   // in the case where there is no match, as other alternatives take other
4928   // parse methods.
4929   MCAsmParser &Parser = getParser();
4930   AsmToken Tok = Parser.getTok();
4931   SMLoc S = Tok.getLoc();
4932 
4933   // Do immediates first, as we always parse those if we have a '#'.
4934   if (Parser.getTok().is(AsmToken::Hash) ||
4935       Parser.getTok().is(AsmToken::Dollar)) {
4936     Parser.Lex(); // Eat '#' or '$'.
4937     // Explicitly look for a '-', as we need to encode negative zero
4938     // differently.
4939     bool isNegative = Parser.getTok().is(AsmToken::Minus);
4940     const MCExpr *Offset;
4941     SMLoc E;
4942     if (getParser().parseExpression(Offset, E))
4943       return MatchOperand_ParseFail;
4944     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4945     if (!CE) {
4946       Error(S, "constant expression expected");
4947       return MatchOperand_ParseFail;
4948     }
4949     // Negative zero is encoded as the flag value
4950     // std::numeric_limits<int32_t>::min().
4951     int32_t Val = CE->getValue();
4952     if (isNegative && Val == 0)
4953       Val = std::numeric_limits<int32_t>::min();
4954 
4955     Operands.push_back(
4956       ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
4957 
4958     return MatchOperand_Success;
4959   }
4960 
4961   bool haveEaten = false;
4962   bool isAdd = true;
4963   if (Tok.is(AsmToken::Plus)) {
4964     Parser.Lex(); // Eat the '+' token.
4965     haveEaten = true;
4966   } else if (Tok.is(AsmToken::Minus)) {
4967     Parser.Lex(); // Eat the '-' token.
4968     isAdd = false;
4969     haveEaten = true;
4970   }
4971 
4972   Tok = Parser.getTok();
4973   int Reg = tryParseRegister();
4974   if (Reg == -1) {
4975     if (!haveEaten)
4976       return MatchOperand_NoMatch;
4977     Error(Tok.getLoc(), "register expected");
4978     return MatchOperand_ParseFail;
4979   }
4980 
4981   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4982                                                   0, S, Tok.getEndLoc()));
4983 
4984   return MatchOperand_Success;
4985 }
4986 
4987 /// Convert parsed operands to MCInst.  Needed here because this instruction
4988 /// only has two register operands, but multiplication is commutative so
4989 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
4990 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
4991                                     const OperandVector &Operands) {
4992   ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
4993   ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
4994   // If we have a three-operand form, make sure to set Rn to be the operand
4995   // that isn't the same as Rd.
4996   unsigned RegOp = 4;
4997   if (Operands.size() == 6 &&
4998       ((ARMOperand &)*Operands[4]).getReg() ==
4999           ((ARMOperand &)*Operands[3]).getReg())
5000     RegOp = 5;
5001   ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
5002   Inst.addOperand(Inst.getOperand(0));
5003   ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
5004 }
5005 
5006 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
5007                                     const OperandVector &Operands) {
5008   int CondOp = -1, ImmOp = -1;
5009   switch(Inst.getOpcode()) {
5010     case ARM::tB:
5011     case ARM::tBcc:  CondOp = 1; ImmOp = 2; break;
5012 
5013     case ARM::t2B:
5014     case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
5015 
5016     default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
5017   }
5018   // first decide whether or not the branch should be conditional
5019   // by looking at it's location relative to an IT block
5020   if(inITBlock()) {
5021     // inside an IT block we cannot have any conditional branches. any
5022     // such instructions needs to be converted to unconditional form
5023     switch(Inst.getOpcode()) {
5024       case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
5025       case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
5026     }
5027   } else {
5028     // outside IT blocks we can only have unconditional branches with AL
5029     // condition code or conditional branches with non-AL condition code
5030     unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
5031     switch(Inst.getOpcode()) {
5032       case ARM::tB:
5033       case ARM::tBcc:
5034         Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
5035         break;
5036       case ARM::t2B:
5037       case ARM::t2Bcc:
5038         Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
5039         break;
5040     }
5041   }
5042 
5043   // now decide on encoding size based on branch target range
5044   switch(Inst.getOpcode()) {
5045     // classify tB as either t2B or t1B based on range of immediate operand
5046     case ARM::tB: {
5047       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5048       if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
5049         Inst.setOpcode(ARM::t2B);
5050       break;
5051     }
5052     // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
5053     case ARM::tBcc: {
5054       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
5055       if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
5056         Inst.setOpcode(ARM::t2Bcc);
5057       break;
5058     }
5059   }
5060   ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
5061   ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
5062 }
5063 
5064 /// Parse an ARM memory expression, return false if successful else return true
5065 /// or an error.  The first token must be a '[' when called.
5066 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
5067   MCAsmParser &Parser = getParser();
5068   SMLoc S, E;
5069   if (Parser.getTok().isNot(AsmToken::LBrac))
5070     return TokError("Token is not a Left Bracket");
5071   S = Parser.getTok().getLoc();
5072   Parser.Lex(); // Eat left bracket token.
5073 
5074   const AsmToken &BaseRegTok = Parser.getTok();
5075   int BaseRegNum = tryParseRegister();
5076   if (BaseRegNum == -1)
5077     return Error(BaseRegTok.getLoc(), "register expected");
5078 
5079   // The next token must either be a comma, a colon or a closing bracket.
5080   const AsmToken &Tok = Parser.getTok();
5081   if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
5082       !Tok.is(AsmToken::RBrac))
5083     return Error(Tok.getLoc(), "malformed memory operand");
5084 
5085   if (Tok.is(AsmToken::RBrac)) {
5086     E = Tok.getEndLoc();
5087     Parser.Lex(); // Eat right bracket token.
5088 
5089     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5090                                              ARM_AM::no_shift, 0, 0, false,
5091                                              S, E));
5092 
5093     // If there's a pre-indexing writeback marker, '!', just add it as a token
5094     // operand. It's rather odd, but syntactically valid.
5095     if (Parser.getTok().is(AsmToken::Exclaim)) {
5096       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5097       Parser.Lex(); // Eat the '!'.
5098     }
5099 
5100     return false;
5101   }
5102 
5103   assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
5104          "Lost colon or comma in memory operand?!");
5105   if (Tok.is(AsmToken::Comma)) {
5106     Parser.Lex(); // Eat the comma.
5107   }
5108 
5109   // If we have a ':', it's an alignment specifier.
5110   if (Parser.getTok().is(AsmToken::Colon)) {
5111     Parser.Lex(); // Eat the ':'.
5112     E = Parser.getTok().getLoc();
5113     SMLoc AlignmentLoc = Tok.getLoc();
5114 
5115     const MCExpr *Expr;
5116     if (getParser().parseExpression(Expr))
5117      return true;
5118 
5119     // The expression has to be a constant. Memory references with relocations
5120     // don't come through here, as they use the <label> forms of the relevant
5121     // instructions.
5122     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5123     if (!CE)
5124       return Error (E, "constant expression expected");
5125 
5126     unsigned Align = 0;
5127     switch (CE->getValue()) {
5128     default:
5129       return Error(E,
5130                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
5131     case 16:  Align = 2; break;
5132     case 32:  Align = 4; break;
5133     case 64:  Align = 8; break;
5134     case 128: Align = 16; break;
5135     case 256: Align = 32; break;
5136     }
5137 
5138     // Now we should have the closing ']'
5139     if (Parser.getTok().isNot(AsmToken::RBrac))
5140       return Error(Parser.getTok().getLoc(), "']' expected");
5141     E = Parser.getTok().getEndLoc();
5142     Parser.Lex(); // Eat right bracket token.
5143 
5144     // Don't worry about range checking the value here. That's handled by
5145     // the is*() predicates.
5146     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
5147                                              ARM_AM::no_shift, 0, Align,
5148                                              false, S, E, AlignmentLoc));
5149 
5150     // If there's a pre-indexing writeback marker, '!', just add it as a token
5151     // operand.
5152     if (Parser.getTok().is(AsmToken::Exclaim)) {
5153       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5154       Parser.Lex(); // Eat the '!'.
5155     }
5156 
5157     return false;
5158   }
5159 
5160   // If we have a '#', it's an immediate offset, else assume it's a register
5161   // offset. Be friendly and also accept a plain integer (without a leading
5162   // hash) for gas compatibility.
5163   if (Parser.getTok().is(AsmToken::Hash) ||
5164       Parser.getTok().is(AsmToken::Dollar) ||
5165       Parser.getTok().is(AsmToken::Integer)) {
5166     if (Parser.getTok().isNot(AsmToken::Integer))
5167       Parser.Lex(); // Eat '#' or '$'.
5168     E = Parser.getTok().getLoc();
5169 
5170     bool isNegative = getParser().getTok().is(AsmToken::Minus);
5171     const MCExpr *Offset;
5172     if (getParser().parseExpression(Offset))
5173      return true;
5174 
5175     // The expression has to be a constant. Memory references with relocations
5176     // don't come through here, as they use the <label> forms of the relevant
5177     // instructions.
5178     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5179     if (!CE)
5180       return Error (E, "constant expression expected");
5181 
5182     // If the constant was #-0, represent it as
5183     // std::numeric_limits<int32_t>::min().
5184     int32_t Val = CE->getValue();
5185     if (isNegative && Val == 0)
5186       CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5187                                   getContext());
5188 
5189     // Now we should have the closing ']'
5190     if (Parser.getTok().isNot(AsmToken::RBrac))
5191       return Error(Parser.getTok().getLoc(), "']' expected");
5192     E = Parser.getTok().getEndLoc();
5193     Parser.Lex(); // Eat right bracket token.
5194 
5195     // Don't worry about range checking the value here. That's handled by
5196     // the is*() predicates.
5197     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
5198                                              ARM_AM::no_shift, 0, 0,
5199                                              false, S, E));
5200 
5201     // If there's a pre-indexing writeback marker, '!', just add it as a token
5202     // operand.
5203     if (Parser.getTok().is(AsmToken::Exclaim)) {
5204       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5205       Parser.Lex(); // Eat the '!'.
5206     }
5207 
5208     return false;
5209   }
5210 
5211   // The register offset is optionally preceded by a '+' or '-'
5212   bool isNegative = false;
5213   if (Parser.getTok().is(AsmToken::Minus)) {
5214     isNegative = true;
5215     Parser.Lex(); // Eat the '-'.
5216   } else if (Parser.getTok().is(AsmToken::Plus)) {
5217     // Nothing to do.
5218     Parser.Lex(); // Eat the '+'.
5219   }
5220 
5221   E = Parser.getTok().getLoc();
5222   int OffsetRegNum = tryParseRegister();
5223   if (OffsetRegNum == -1)
5224     return Error(E, "register expected");
5225 
5226   // If there's a shift operator, handle it.
5227   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
5228   unsigned ShiftImm = 0;
5229   if (Parser.getTok().is(AsmToken::Comma)) {
5230     Parser.Lex(); // Eat the ','.
5231     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5232       return true;
5233   }
5234 
5235   // Now we should have the closing ']'
5236   if (Parser.getTok().isNot(AsmToken::RBrac))
5237     return Error(Parser.getTok().getLoc(), "']' expected");
5238   E = Parser.getTok().getEndLoc();
5239   Parser.Lex(); // Eat right bracket token.
5240 
5241   Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5242                                            ShiftType, ShiftImm, 0, isNegative,
5243                                            S, E));
5244 
5245   // If there's a pre-indexing writeback marker, '!', just add it as a token
5246   // operand.
5247   if (Parser.getTok().is(AsmToken::Exclaim)) {
5248     Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5249     Parser.Lex(); // Eat the '!'.
5250   }
5251 
5252   return false;
5253 }
5254 
5255 /// parseMemRegOffsetShift - one of these two:
5256 ///   ( lsl | lsr | asr | ror ) , # shift_amount
5257 ///   rrx
5258 /// return true if it parses a shift otherwise it returns false.
5259 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5260                                           unsigned &Amount) {
5261   MCAsmParser &Parser = getParser();
5262   SMLoc Loc = Parser.getTok().getLoc();
5263   const AsmToken &Tok = Parser.getTok();
5264   if (Tok.isNot(AsmToken::Identifier))
5265     return Error(Loc, "illegal shift operator");
5266   StringRef ShiftName = Tok.getString();
5267   if (ShiftName == "lsl" || ShiftName == "LSL" ||
5268       ShiftName == "asl" || ShiftName == "ASL")
5269     St = ARM_AM::lsl;
5270   else if (ShiftName == "lsr" || ShiftName == "LSR")
5271     St = ARM_AM::lsr;
5272   else if (ShiftName == "asr" || ShiftName == "ASR")
5273     St = ARM_AM::asr;
5274   else if (ShiftName == "ror" || ShiftName == "ROR")
5275     St = ARM_AM::ror;
5276   else if (ShiftName == "rrx" || ShiftName == "RRX")
5277     St = ARM_AM::rrx;
5278   else
5279     return Error(Loc, "illegal shift operator");
5280   Parser.Lex(); // Eat shift type token.
5281 
5282   // rrx stands alone.
5283   Amount = 0;
5284   if (St != ARM_AM::rrx) {
5285     Loc = Parser.getTok().getLoc();
5286     // A '#' and a shift amount.
5287     const AsmToken &HashTok = Parser.getTok();
5288     if (HashTok.isNot(AsmToken::Hash) &&
5289         HashTok.isNot(AsmToken::Dollar))
5290       return Error(HashTok.getLoc(), "'#' expected");
5291     Parser.Lex(); // Eat hash token.
5292 
5293     const MCExpr *Expr;
5294     if (getParser().parseExpression(Expr))
5295       return true;
5296     // Range check the immediate.
5297     // lsl, ror: 0 <= imm <= 31
5298     // lsr, asr: 0 <= imm <= 32
5299     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5300     if (!CE)
5301       return Error(Loc, "shift amount must be an immediate");
5302     int64_t Imm = CE->getValue();
5303     if (Imm < 0 ||
5304         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5305         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5306       return Error(Loc, "immediate shift value out of range");
5307     // If <ShiftTy> #0, turn it into a no_shift.
5308     if (Imm == 0)
5309       St = ARM_AM::lsl;
5310     // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5311     if (Imm == 32)
5312       Imm = 0;
5313     Amount = Imm;
5314   }
5315 
5316   return false;
5317 }
5318 
5319 /// parseFPImm - A floating point immediate expression operand.
5320 OperandMatchResultTy
5321 ARMAsmParser::parseFPImm(OperandVector &Operands) {
5322   MCAsmParser &Parser = getParser();
5323   // Anything that can accept a floating point constant as an operand
5324   // needs to go through here, as the regular parseExpression is
5325   // integer only.
5326   //
5327   // This routine still creates a generic Immediate operand, containing
5328   // a bitcast of the 64-bit floating point value. The various operands
5329   // that accept floats can check whether the value is valid for them
5330   // via the standard is*() predicates.
5331 
5332   SMLoc S = Parser.getTok().getLoc();
5333 
5334   if (Parser.getTok().isNot(AsmToken::Hash) &&
5335       Parser.getTok().isNot(AsmToken::Dollar))
5336     return MatchOperand_NoMatch;
5337 
5338   // Disambiguate the VMOV forms that can accept an FP immediate.
5339   // vmov.f32 <sreg>, #imm
5340   // vmov.f64 <dreg>, #imm
5341   // vmov.f32 <dreg>, #imm  @ vector f32x2
5342   // vmov.f32 <qreg>, #imm  @ vector f32x4
5343   //
5344   // There are also the NEON VMOV instructions which expect an
5345   // integer constant. Make sure we don't try to parse an FPImm
5346   // for these:
5347   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5348   ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5349   bool isVmovf = TyOp.isToken() &&
5350                  (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5351                   TyOp.getToken() == ".f16");
5352   ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5353   bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5354                                          Mnemonic.getToken() == "fconsts");
5355   if (!(isVmovf || isFconst))
5356     return MatchOperand_NoMatch;
5357 
5358   Parser.Lex(); // Eat '#' or '$'.
5359 
5360   // Handle negation, as that still comes through as a separate token.
5361   bool isNegative = false;
5362   if (Parser.getTok().is(AsmToken::Minus)) {
5363     isNegative = true;
5364     Parser.Lex();
5365   }
5366   const AsmToken &Tok = Parser.getTok();
5367   SMLoc Loc = Tok.getLoc();
5368   if (Tok.is(AsmToken::Real) && isVmovf) {
5369     APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
5370     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5371     // If we had a '-' in front, toggle the sign bit.
5372     IntVal ^= (uint64_t)isNegative << 31;
5373     Parser.Lex(); // Eat the token.
5374     Operands.push_back(ARMOperand::CreateImm(
5375           MCConstantExpr::create(IntVal, getContext()),
5376           S, Parser.getTok().getLoc()));
5377     return MatchOperand_Success;
5378   }
5379   // Also handle plain integers. Instructions which allow floating point
5380   // immediates also allow a raw encoded 8-bit value.
5381   if (Tok.is(AsmToken::Integer) && isFconst) {
5382     int64_t Val = Tok.getIntVal();
5383     Parser.Lex(); // Eat the token.
5384     if (Val > 255 || Val < 0) {
5385       Error(Loc, "encoded floating point value out of range");
5386       return MatchOperand_ParseFail;
5387     }
5388     float RealVal = ARM_AM::getFPImmFloat(Val);
5389     Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
5390 
5391     Operands.push_back(ARMOperand::CreateImm(
5392         MCConstantExpr::create(Val, getContext()), S,
5393         Parser.getTok().getLoc()));
5394     return MatchOperand_Success;
5395   }
5396 
5397   Error(Loc, "invalid floating point immediate");
5398   return MatchOperand_ParseFail;
5399 }
5400 
5401 /// Parse a arm instruction operand.  For now this parses the operand regardless
5402 /// of the mnemonic.
5403 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
5404   MCAsmParser &Parser = getParser();
5405   SMLoc S, E;
5406 
5407   // Check if the current operand has a custom associated parser, if so, try to
5408   // custom parse the operand, or fallback to the general approach.
5409   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
5410   if (ResTy == MatchOperand_Success)
5411     return false;
5412   // If there wasn't a custom match, try the generic matcher below. Otherwise,
5413   // there was a match, but an error occurred, in which case, just return that
5414   // the operand parsing failed.
5415   if (ResTy == MatchOperand_ParseFail)
5416     return true;
5417 
5418   switch (getLexer().getKind()) {
5419   default:
5420     Error(Parser.getTok().getLoc(), "unexpected token in operand");
5421     return true;
5422   case AsmToken::Identifier: {
5423     // If we've seen a branch mnemonic, the next operand must be a label.  This
5424     // is true even if the label is a register name.  So "br r1" means branch to
5425     // label "r1".
5426     bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
5427     if (!ExpectLabel) {
5428       if (!tryParseRegisterWithWriteBack(Operands))
5429         return false;
5430       int Res = tryParseShiftRegister(Operands);
5431       if (Res == 0) // success
5432         return false;
5433       else if (Res == -1) // irrecoverable error
5434         return true;
5435       // If this is VMRS, check for the apsr_nzcv operand.
5436       if (Mnemonic == "vmrs" &&
5437           Parser.getTok().getString().equals_lower("apsr_nzcv")) {
5438         S = Parser.getTok().getLoc();
5439         Parser.Lex();
5440         Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
5441         return false;
5442       }
5443     }
5444 
5445     // Fall though for the Identifier case that is not a register or a
5446     // special name.
5447     LLVM_FALLTHROUGH;
5448   }
5449   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
5450   case AsmToken::Integer: // things like 1f and 2b as a branch targets
5451   case AsmToken::String:  // quoted label names.
5452   case AsmToken::Dot: {   // . as a branch target
5453     // This was not a register so parse other operands that start with an
5454     // identifier (like labels) as expressions and create them as immediates.
5455     const MCExpr *IdVal;
5456     S = Parser.getTok().getLoc();
5457     if (getParser().parseExpression(IdVal))
5458       return true;
5459     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5460     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
5461     return false;
5462   }
5463   case AsmToken::LBrac:
5464     return parseMemory(Operands);
5465   case AsmToken::LCurly:
5466     return parseRegisterList(Operands);
5467   case AsmToken::Dollar:
5468   case AsmToken::Hash:
5469     // #42 -> immediate.
5470     S = Parser.getTok().getLoc();
5471     Parser.Lex();
5472 
5473     if (Parser.getTok().isNot(AsmToken::Colon)) {
5474       bool isNegative = Parser.getTok().is(AsmToken::Minus);
5475       const MCExpr *ImmVal;
5476       if (getParser().parseExpression(ImmVal))
5477         return true;
5478       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
5479       if (CE) {
5480         int32_t Val = CE->getValue();
5481         if (isNegative && Val == 0)
5482           ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5483                                           getContext());
5484       }
5485       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5486       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
5487 
5488       // There can be a trailing '!' on operands that we want as a separate
5489       // '!' Token operand. Handle that here. For example, the compatibility
5490       // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
5491       if (Parser.getTok().is(AsmToken::Exclaim)) {
5492         Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
5493                                                    Parser.getTok().getLoc()));
5494         Parser.Lex(); // Eat exclaim token
5495       }
5496       return false;
5497     }
5498     // w/ a ':' after the '#', it's just like a plain ':'.
5499     LLVM_FALLTHROUGH;
5500 
5501   case AsmToken::Colon: {
5502     S = Parser.getTok().getLoc();
5503     // ":lower16:" and ":upper16:" expression prefixes
5504     // FIXME: Check it's an expression prefix,
5505     // e.g. (FOO - :lower16:BAR) isn't legal.
5506     ARMMCExpr::VariantKind RefKind;
5507     if (parsePrefix(RefKind))
5508       return true;
5509 
5510     const MCExpr *SubExprVal;
5511     if (getParser().parseExpression(SubExprVal))
5512       return true;
5513 
5514     const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
5515                                               getContext());
5516     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5517     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
5518     return false;
5519   }
5520   case AsmToken::Equal: {
5521     S = Parser.getTok().getLoc();
5522     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5523       return Error(S, "unexpected token in operand");
5524     Parser.Lex(); // Eat '='
5525     const MCExpr *SubExprVal;
5526     if (getParser().parseExpression(SubExprVal))
5527       return true;
5528     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5529 
5530     // execute-only: we assume that assembly programmers know what they are
5531     // doing and allow literal pool creation here
5532     Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
5533     return false;
5534   }
5535   }
5536 }
5537 
5538 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
5539 //  :lower16: and :upper16:.
5540 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
5541   MCAsmParser &Parser = getParser();
5542   RefKind = ARMMCExpr::VK_ARM_None;
5543 
5544   // consume an optional '#' (GNU compatibility)
5545   if (getLexer().is(AsmToken::Hash))
5546     Parser.Lex();
5547 
5548   // :lower16: and :upper16: modifiers
5549   assert(getLexer().is(AsmToken::Colon) && "expected a :");
5550   Parser.Lex(); // Eat ':'
5551 
5552   if (getLexer().isNot(AsmToken::Identifier)) {
5553     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
5554     return true;
5555   }
5556 
5557   enum {
5558     COFF = (1 << MCObjectFileInfo::IsCOFF),
5559     ELF = (1 << MCObjectFileInfo::IsELF),
5560     MACHO = (1 << MCObjectFileInfo::IsMachO),
5561     WASM = (1 << MCObjectFileInfo::IsWasm),
5562   };
5563   static const struct PrefixEntry {
5564     const char *Spelling;
5565     ARMMCExpr::VariantKind VariantKind;
5566     uint8_t SupportedFormats;
5567   } PrefixEntries[] = {
5568     { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
5569     { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
5570   };
5571 
5572   StringRef IDVal = Parser.getTok().getIdentifier();
5573 
5574   const auto &Prefix =
5575       std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
5576                    [&IDVal](const PrefixEntry &PE) {
5577                       return PE.Spelling == IDVal;
5578                    });
5579   if (Prefix == std::end(PrefixEntries)) {
5580     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
5581     return true;
5582   }
5583 
5584   uint8_t CurrentFormat;
5585   switch (getContext().getObjectFileInfo()->getObjectFileType()) {
5586   case MCObjectFileInfo::IsMachO:
5587     CurrentFormat = MACHO;
5588     break;
5589   case MCObjectFileInfo::IsELF:
5590     CurrentFormat = ELF;
5591     break;
5592   case MCObjectFileInfo::IsCOFF:
5593     CurrentFormat = COFF;
5594     break;
5595   case MCObjectFileInfo::IsWasm:
5596     CurrentFormat = WASM;
5597     break;
5598   case MCObjectFileInfo::IsXCOFF:
5599     llvm_unreachable("unexpected object format");
5600     break;
5601   }
5602 
5603   if (~Prefix->SupportedFormats & CurrentFormat) {
5604     Error(Parser.getTok().getLoc(),
5605           "cannot represent relocation in the current file format");
5606     return true;
5607   }
5608 
5609   RefKind = Prefix->VariantKind;
5610   Parser.Lex();
5611 
5612   if (getLexer().isNot(AsmToken::Colon)) {
5613     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
5614     return true;
5615   }
5616   Parser.Lex(); // Eat the last ':'
5617 
5618   return false;
5619 }
5620 
5621 /// Given a mnemonic, split out possible predication code and carry
5622 /// setting letters to form a canonical mnemonic and flags.
5623 //
5624 // FIXME: Would be nice to autogen this.
5625 // FIXME: This is a bit of a maze of special cases.
5626 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
5627                                       unsigned &PredicationCode,
5628                                       bool &CarrySetting,
5629                                       unsigned &ProcessorIMod,
5630                                       StringRef &ITMask) {
5631   PredicationCode = ARMCC::AL;
5632   CarrySetting = false;
5633   ProcessorIMod = 0;
5634 
5635   // Ignore some mnemonics we know aren't predicated forms.
5636   //
5637   // FIXME: Would be nice to autogen this.
5638   if ((Mnemonic == "movs" && isThumb()) ||
5639       Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
5640       Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
5641       Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
5642       Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
5643       Mnemonic == "vaclt" || Mnemonic == "vacle"  || Mnemonic == "hlt" ||
5644       Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
5645       Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
5646       Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
5647       Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
5648       Mnemonic == "vcvta" || Mnemonic == "vcvtn"  || Mnemonic == "vcvtp" ||
5649       Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
5650       Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
5651       Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
5652       Mnemonic == "bxns"  || Mnemonic == "blxns" ||
5653       Mnemonic == "vudot" || Mnemonic == "vsdot" ||
5654       Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
5655       Mnemonic == "vfmal" || Mnemonic == "vfmsl")
5656     return Mnemonic;
5657 
5658   // First, split out any predication code. Ignore mnemonics we know aren't
5659   // predicated but do have a carry-set and so weren't caught above.
5660   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
5661       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
5662       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
5663       Mnemonic != "sbcs" && Mnemonic != "rscs") {
5664     unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
5665     if (CC != ~0U) {
5666       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
5667       PredicationCode = CC;
5668     }
5669   }
5670 
5671   // Next, determine if we have a carry setting bit. We explicitly ignore all
5672   // the instructions we know end in 's'.
5673   if (Mnemonic.endswith("s") &&
5674       !(Mnemonic == "cps" || Mnemonic == "mls" ||
5675         Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
5676         Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
5677         Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
5678         Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
5679         Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
5680         Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
5681         Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
5682         Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
5683         Mnemonic == "bxns" || Mnemonic == "blxns" ||
5684         (Mnemonic == "movs" && isThumb()))) {
5685     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
5686     CarrySetting = true;
5687   }
5688 
5689   // The "cps" instruction can have a interrupt mode operand which is glued into
5690   // the mnemonic. Check if this is the case, split it and parse the imod op
5691   if (Mnemonic.startswith("cps")) {
5692     // Split out any imod code.
5693     unsigned IMod =
5694       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
5695       .Case("ie", ARM_PROC::IE)
5696       .Case("id", ARM_PROC::ID)
5697       .Default(~0U);
5698     if (IMod != ~0U) {
5699       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
5700       ProcessorIMod = IMod;
5701     }
5702   }
5703 
5704   // The "it" instruction has the condition mask on the end of the mnemonic.
5705   if (Mnemonic.startswith("it")) {
5706     ITMask = Mnemonic.slice(2, Mnemonic.size());
5707     Mnemonic = Mnemonic.slice(0, 2);
5708   }
5709 
5710   return Mnemonic;
5711 }
5712 
5713 /// Given a canonical mnemonic, determine if the instruction ever allows
5714 /// inclusion of carry set or predication code operands.
5715 //
5716 // FIXME: It would be nice to autogen this.
5717 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
5718                                          bool &CanAcceptCarrySet,
5719                                          bool &CanAcceptPredicationCode) {
5720   CanAcceptCarrySet =
5721       Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5722       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
5723       Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
5724       Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
5725       Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
5726       Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
5727       Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
5728       (!isThumb() &&
5729        (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
5730         Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
5731 
5732   if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
5733       Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
5734       Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
5735       Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
5736       Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
5737       Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
5738       Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
5739       Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
5740       Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
5741       Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
5742       (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
5743       Mnemonic == "vmovx" || Mnemonic == "vins" ||
5744       Mnemonic == "vudot" || Mnemonic == "vsdot" ||
5745       Mnemonic == "vcmla" || Mnemonic == "vcadd" ||
5746       Mnemonic == "vfmal" || Mnemonic == "vfmsl" ||
5747       Mnemonic == "sb"    || Mnemonic == "ssbb"  ||
5748       Mnemonic == "pssbb") {
5749     // These mnemonics are never predicable
5750     CanAcceptPredicationCode = false;
5751   } else if (!isThumb()) {
5752     // Some instructions are only predicable in Thumb mode
5753     CanAcceptPredicationCode =
5754         Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
5755         Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
5756         Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" &&
5757         Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" &&
5758         Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" &&
5759         Mnemonic != "stc2" && Mnemonic != "stc2l" &&
5760         Mnemonic != "tsb" &&
5761         !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs");
5762   } else if (isThumbOne()) {
5763     if (hasV6MOps())
5764       CanAcceptPredicationCode = Mnemonic != "movs";
5765     else
5766       CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
5767   } else
5768     CanAcceptPredicationCode = true;
5769 }
5770 
5771 // Some Thumb instructions have two operand forms that are not
5772 // available as three operand, convert to two operand form if possible.
5773 //
5774 // FIXME: We would really like to be able to tablegen'erate this.
5775 void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
5776                                                  bool CarrySetting,
5777                                                  OperandVector &Operands) {
5778   if (Operands.size() != 6)
5779     return;
5780 
5781   const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5782         auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
5783   if (!Op3.isReg() || !Op4.isReg())
5784     return;
5785 
5786   auto Op3Reg = Op3.getReg();
5787   auto Op4Reg = Op4.getReg();
5788 
5789   // For most Thumb2 cases we just generate the 3 operand form and reduce
5790   // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
5791   // won't accept SP or PC so we do the transformation here taking care
5792   // with immediate range in the 'add sp, sp #imm' case.
5793   auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
5794   if (isThumbTwo()) {
5795     if (Mnemonic != "add")
5796       return;
5797     bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
5798                         (Op5.isReg() && Op5.getReg() == ARM::PC);
5799     if (!TryTransform) {
5800       TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
5801                       (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
5802                      !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
5803                        Op5.isImm() && !Op5.isImm0_508s4());
5804     }
5805     if (!TryTransform)
5806       return;
5807   } else if (!isThumbOne())
5808     return;
5809 
5810   if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
5811         Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5812         Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
5813         Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
5814     return;
5815 
5816   // If first 2 operands of a 3 operand instruction are the same
5817   // then transform to 2 operand version of the same instruction
5818   // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
5819   bool Transform = Op3Reg == Op4Reg;
5820 
5821   // For communtative operations, we might be able to transform if we swap
5822   // Op4 and Op5.  The 'ADD Rdm, SP, Rdm' form is already handled specially
5823   // as tADDrsp.
5824   const ARMOperand *LastOp = &Op5;
5825   bool Swap = false;
5826   if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
5827       ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
5828        Mnemonic == "and" || Mnemonic == "eor" ||
5829        Mnemonic == "adc" || Mnemonic == "orr")) {
5830     Swap = true;
5831     LastOp = &Op4;
5832     Transform = true;
5833   }
5834 
5835   // If both registers are the same then remove one of them from
5836   // the operand list, with certain exceptions.
5837   if (Transform) {
5838     // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
5839     // 2 operand forms don't exist.
5840     if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
5841         LastOp->isReg())
5842       Transform = false;
5843 
5844     // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
5845     // 3-bits because the ARMARM says not to.
5846     if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
5847       Transform = false;
5848   }
5849 
5850   if (Transform) {
5851     if (Swap)
5852       std::swap(Op4, Op5);
5853     Operands.erase(Operands.begin() + 3);
5854   }
5855 }
5856 
5857 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
5858                                           OperandVector &Operands) {
5859   // FIXME: This is all horribly hacky. We really need a better way to deal
5860   // with optional operands like this in the matcher table.
5861 
5862   // The 'mov' mnemonic is special. One variant has a cc_out operand, while
5863   // another does not. Specifically, the MOVW instruction does not. So we
5864   // special case it here and remove the defaulted (non-setting) cc_out
5865   // operand if that's the instruction we're trying to match.
5866   //
5867   // We do this as post-processing of the explicit operands rather than just
5868   // conditionally adding the cc_out in the first place because we need
5869   // to check the type of the parsed immediate operand.
5870   if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
5871       !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
5872       static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
5873       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5874     return true;
5875 
5876   // Register-register 'add' for thumb does not have a cc_out operand
5877   // when there are only two register operands.
5878   if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
5879       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5880       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5881       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5882     return true;
5883   // Register-register 'add' for thumb does not have a cc_out operand
5884   // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
5885   // have to check the immediate range here since Thumb2 has a variant
5886   // that can handle a different range and has a cc_out operand.
5887   if (((isThumb() && Mnemonic == "add") ||
5888        (isThumbTwo() && Mnemonic == "sub")) &&
5889       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5890       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5891       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
5892       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5893       ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
5894        static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
5895     return true;
5896   // For Thumb2, add/sub immediate does not have a cc_out operand for the
5897   // imm0_4095 variant. That's the least-preferred variant when
5898   // selecting via the generic "add" mnemonic, so to know that we
5899   // should remove the cc_out operand, we have to explicitly check that
5900   // it's not one of the other variants. Ugh.
5901   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
5902       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5903       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5904       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
5905     // Nest conditions rather than one big 'if' statement for readability.
5906     //
5907     // If both registers are low, we're in an IT block, and the immediate is
5908     // in range, we should use encoding T1 instead, which has a cc_out.
5909     if (inITBlock() &&
5910         isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
5911         isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
5912         static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
5913       return false;
5914     // Check against T3. If the second register is the PC, this is an
5915     // alternate form of ADR, which uses encoding T4, so check for that too.
5916     if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
5917         static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
5918       return false;
5919 
5920     // Otherwise, we use encoding T4, which does not have a cc_out
5921     // operand.
5922     return true;
5923   }
5924 
5925   // The thumb2 multiply instruction doesn't have a CCOut register, so
5926   // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
5927   // use the 16-bit encoding or not.
5928   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
5929       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5930       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5931       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5932       static_cast<ARMOperand &>(*Operands[5]).isReg() &&
5933       // If the registers aren't low regs, the destination reg isn't the
5934       // same as one of the source regs, or the cc_out operand is zero
5935       // outside of an IT block, we have to use the 32-bit encoding, so
5936       // remove the cc_out operand.
5937       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5938        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5939        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
5940        !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5941                             static_cast<ARMOperand &>(*Operands[5]).getReg() &&
5942                         static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5943                             static_cast<ARMOperand &>(*Operands[4]).getReg())))
5944     return true;
5945 
5946   // Also check the 'mul' syntax variant that doesn't specify an explicit
5947   // destination register.
5948   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
5949       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5950       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5951       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5952       // If the registers aren't low regs  or the cc_out operand is zero
5953       // outside of an IT block, we have to use the 32-bit encoding, so
5954       // remove the cc_out operand.
5955       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5956        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5957        !inITBlock()))
5958     return true;
5959 
5960   // Register-register 'add/sub' for thumb does not have a cc_out operand
5961   // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
5962   // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
5963   // right, this will result in better diagnostics (which operand is off)
5964   // anyway.
5965   if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
5966       (Operands.size() == 5 || Operands.size() == 6) &&
5967       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5968       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
5969       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5970       (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
5971        (Operands.size() == 6 &&
5972         static_cast<ARMOperand &>(*Operands[5]).isImm())))
5973     return true;
5974 
5975   return false;
5976 }
5977 
5978 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
5979                                               OperandVector &Operands) {
5980   // VRINT{Z, X} have a predicate operand in VFP, but not in NEON
5981   unsigned RegIdx = 3;
5982   if ((Mnemonic == "vrintz" || Mnemonic == "vrintx") &&
5983       (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
5984        static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
5985     if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
5986         (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
5987          static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
5988       RegIdx = 4;
5989 
5990     if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
5991         (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
5992              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
5993          ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
5994              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
5995       return true;
5996   }
5997   return false;
5998 }
5999 
6000 static bool isDataTypeToken(StringRef Tok) {
6001   return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
6002     Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
6003     Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
6004     Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
6005     Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
6006     Tok == ".f" || Tok == ".d";
6007 }
6008 
6009 // FIXME: This bit should probably be handled via an explicit match class
6010 // in the .td files that matches the suffix instead of having it be
6011 // a literal string token the way it is now.
6012 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
6013   return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
6014 }
6015 
6016 static void applyMnemonicAliases(StringRef &Mnemonic,
6017                                  const FeatureBitset &Features,
6018                                  unsigned VariantID);
6019 
6020 // The GNU assembler has aliases of ldrd and strd with the second register
6021 // omitted. We don't have a way to do that in tablegen, so fix it up here.
6022 //
6023 // We have to be careful to not emit an invalid Rt2 here, because the rest of
6024 // the assmebly parser could then generate confusing diagnostics refering to
6025 // it. If we do find anything that prevents us from doing the transformation we
6026 // bail out, and let the assembly parser report an error on the instruction as
6027 // it is written.
6028 void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
6029                                      OperandVector &Operands) {
6030   if (Mnemonic != "ldrd" && Mnemonic != "strd")
6031     return;
6032   if (Operands.size() < 4)
6033     return;
6034 
6035   ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
6036   ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
6037 
6038   if (!Op2.isReg())
6039     return;
6040   if (!Op3.isMem())
6041     return;
6042 
6043   const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
6044   if (!GPR.contains(Op2.getReg()))
6045     return;
6046 
6047   unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
6048   if (!isThumb() && (RtEncoding & 1)) {
6049     // In ARM mode, the registers must be from an aligned pair, this
6050     // restriction does not apply in Thumb mode.
6051     return;
6052   }
6053   if (Op2.getReg() == ARM::PC)
6054     return;
6055   unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
6056   if (!PairedReg || PairedReg == ARM::PC ||
6057       (PairedReg == ARM::SP && !hasV8Ops()))
6058     return;
6059 
6060   Operands.insert(
6061       Operands.begin() + 3,
6062       ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
6063 }
6064 
6065 /// Parse an arm instruction mnemonic followed by its operands.
6066 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
6067                                     SMLoc NameLoc, OperandVector &Operands) {
6068   MCAsmParser &Parser = getParser();
6069 
6070   // Apply mnemonic aliases before doing anything else, as the destination
6071   // mnemonic may include suffices and we want to handle them normally.
6072   // The generic tblgen'erated code does this later, at the start of
6073   // MatchInstructionImpl(), but that's too late for aliases that include
6074   // any sort of suffix.
6075   const FeatureBitset &AvailableFeatures = getAvailableFeatures();
6076   unsigned AssemblerDialect = getParser().getAssemblerDialect();
6077   applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
6078 
6079   // First check for the ARM-specific .req directive.
6080   if (Parser.getTok().is(AsmToken::Identifier) &&
6081       Parser.getTok().getIdentifier() == ".req") {
6082     parseDirectiveReq(Name, NameLoc);
6083     // We always return 'error' for this, as we're done with this
6084     // statement and don't need to match the 'instruction."
6085     return true;
6086   }
6087 
6088   // Create the leading tokens for the mnemonic, split by '.' characters.
6089   size_t Start = 0, Next = Name.find('.');
6090   StringRef Mnemonic = Name.slice(Start, Next);
6091 
6092   // Split out the predication code and carry setting flag from the mnemonic.
6093   unsigned PredicationCode;
6094   unsigned ProcessorIMod;
6095   bool CarrySetting;
6096   StringRef ITMask;
6097   Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
6098                            ProcessorIMod, ITMask);
6099 
6100   // In Thumb1, only the branch (B) instruction can be predicated.
6101   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
6102     return Error(NameLoc, "conditional execution not supported in Thumb1");
6103   }
6104 
6105   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
6106 
6107   // Handle the IT instruction ITMask. Convert it to a bitmask. This
6108   // is the mask as it will be for the IT encoding if the conditional
6109   // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
6110   // where the conditional bit0 is zero, the instruction post-processing
6111   // will adjust the mask accordingly.
6112   if (Mnemonic == "it") {
6113     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
6114     if (ITMask.size() > 3) {
6115       return Error(Loc, "too many conditions on IT instruction");
6116     }
6117     unsigned Mask = 8;
6118     for (unsigned i = ITMask.size(); i != 0; --i) {
6119       char pos = ITMask[i - 1];
6120       if (pos != 't' && pos != 'e') {
6121         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
6122       }
6123       Mask >>= 1;
6124       if (ITMask[i - 1] == 't')
6125         Mask |= 8;
6126     }
6127     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
6128   }
6129 
6130   // FIXME: This is all a pretty gross hack. We should automatically handle
6131   // optional operands like this via tblgen.
6132 
6133   // Next, add the CCOut and ConditionCode operands, if needed.
6134   //
6135   // For mnemonics which can ever incorporate a carry setting bit or predication
6136   // code, our matching model involves us always generating CCOut and
6137   // ConditionCode operands to match the mnemonic "as written" and then we let
6138   // the matcher deal with finding the right instruction or generating an
6139   // appropriate error.
6140   bool CanAcceptCarrySet, CanAcceptPredicationCode;
6141   getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
6142 
6143   // If we had a carry-set on an instruction that can't do that, issue an
6144   // error.
6145   if (!CanAcceptCarrySet && CarrySetting) {
6146     return Error(NameLoc, "instruction '" + Mnemonic +
6147                  "' can not set flags, but 's' suffix specified");
6148   }
6149   // If we had a predication code on an instruction that can't do that, issue an
6150   // error.
6151   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
6152     return Error(NameLoc, "instruction '" + Mnemonic +
6153                  "' is not predicable, but condition code specified");
6154   }
6155 
6156   // Add the carry setting operand, if necessary.
6157   if (CanAcceptCarrySet) {
6158     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
6159     Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
6160                                                Loc));
6161   }
6162 
6163   // Add the predication code operand, if necessary.
6164   if (CanAcceptPredicationCode) {
6165     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
6166                                       CarrySetting);
6167     Operands.push_back(ARMOperand::CreateCondCode(
6168                          ARMCC::CondCodes(PredicationCode), Loc));
6169   }
6170 
6171   // Add the processor imod operand, if necessary.
6172   if (ProcessorIMod) {
6173     Operands.push_back(ARMOperand::CreateImm(
6174           MCConstantExpr::create(ProcessorIMod, getContext()),
6175                                  NameLoc, NameLoc));
6176   } else if (Mnemonic == "cps" && isMClass()) {
6177     return Error(NameLoc, "instruction 'cps' requires effect for M-class");
6178   }
6179 
6180   // Add the remaining tokens in the mnemonic.
6181   while (Next != StringRef::npos) {
6182     Start = Next;
6183     Next = Name.find('.', Start + 1);
6184     StringRef ExtraToken = Name.slice(Start, Next);
6185 
6186     // Some NEON instructions have an optional datatype suffix that is
6187     // completely ignored. Check for that.
6188     if (isDataTypeToken(ExtraToken) &&
6189         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
6190       continue;
6191 
6192     // For for ARM mode generate an error if the .n qualifier is used.
6193     if (ExtraToken == ".n" && !isThumb()) {
6194       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6195       return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
6196                    "arm mode");
6197     }
6198 
6199     // The .n qualifier is always discarded as that is what the tables
6200     // and matcher expect.  In ARM mode the .w qualifier has no effect,
6201     // so discard it to avoid errors that can be caused by the matcher.
6202     if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
6203       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6204       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
6205     }
6206   }
6207 
6208   // Read the remaining operands.
6209   if (getLexer().isNot(AsmToken::EndOfStatement)) {
6210     // Read the first operand.
6211     if (parseOperand(Operands, Mnemonic)) {
6212       return true;
6213     }
6214 
6215     while (parseOptionalToken(AsmToken::Comma)) {
6216       // Parse and remember the operand.
6217       if (parseOperand(Operands, Mnemonic)) {
6218         return true;
6219       }
6220     }
6221   }
6222 
6223   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
6224     return true;
6225 
6226   tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
6227 
6228   // Some instructions, mostly Thumb, have forms for the same mnemonic that
6229   // do and don't have a cc_out optional-def operand. With some spot-checks
6230   // of the operand list, we can figure out which variant we're trying to
6231   // parse and adjust accordingly before actually matching. We shouldn't ever
6232   // try to remove a cc_out operand that was explicitly set on the
6233   // mnemonic, of course (CarrySetting == true). Reason number #317 the
6234   // table driven matcher doesn't fit well with the ARM instruction set.
6235   if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
6236     Operands.erase(Operands.begin() + 1);
6237 
6238   // Some instructions have the same mnemonic, but don't always
6239   // have a predicate. Distinguish them here and delete the
6240   // predicate if needed.
6241   if (PredicationCode == ARMCC::AL &&
6242       shouldOmitPredicateOperand(Mnemonic, Operands))
6243     Operands.erase(Operands.begin() + 1);
6244 
6245   // ARM mode 'blx' need special handling, as the register operand version
6246   // is predicable, but the label operand version is not. So, we can't rely
6247   // on the Mnemonic based checking to correctly figure out when to put
6248   // a k_CondCode operand in the list. If we're trying to match the label
6249   // version, remove the k_CondCode operand here.
6250   if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
6251       static_cast<ARMOperand &>(*Operands[2]).isImm())
6252     Operands.erase(Operands.begin() + 1);
6253 
6254   // Adjust operands of ldrexd/strexd to MCK_GPRPair.
6255   // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
6256   // a single GPRPair reg operand is used in the .td file to replace the two
6257   // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
6258   // expressed as a GPRPair, so we have to manually merge them.
6259   // FIXME: We would really like to be able to tablegen'erate this.
6260   if (!isThumb() && Operands.size() > 4 &&
6261       (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
6262        Mnemonic == "stlexd")) {
6263     bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
6264     unsigned Idx = isLoad ? 2 : 3;
6265     ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
6266     ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
6267 
6268     const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
6269     // Adjust only if Op1 and Op2 are GPRs.
6270     if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
6271         MRC.contains(Op2.getReg())) {
6272       unsigned Reg1 = Op1.getReg();
6273       unsigned Reg2 = Op2.getReg();
6274       unsigned Rt = MRI->getEncodingValue(Reg1);
6275       unsigned Rt2 = MRI->getEncodingValue(Reg2);
6276 
6277       // Rt2 must be Rt + 1 and Rt must be even.
6278       if (Rt + 1 != Rt2 || (Rt & 1)) {
6279         return Error(Op2.getStartLoc(),
6280                      isLoad ? "destination operands must be sequential"
6281                             : "source operands must be sequential");
6282       }
6283       unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
6284           &(MRI->getRegClass(ARM::GPRPairRegClassID)));
6285       Operands[Idx] =
6286           ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
6287       Operands.erase(Operands.begin() + Idx + 1);
6288     }
6289   }
6290 
6291   // GNU Assembler extension (compatibility).
6292   fixupGNULDRDAlias(Mnemonic, Operands);
6293 
6294   // FIXME: As said above, this is all a pretty gross hack.  This instruction
6295   // does not fit with other "subs" and tblgen.
6296   // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
6297   // so the Mnemonic is the original name "subs" and delete the predicate
6298   // operand so it will match the table entry.
6299   if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
6300       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6301       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
6302       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6303       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
6304       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6305     Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
6306     Operands.erase(Operands.begin() + 1);
6307   }
6308   return false;
6309 }
6310 
6311 // Validate context-sensitive operand constraints.
6312 
6313 // return 'true' if register list contains non-low GPR registers,
6314 // 'false' otherwise. If Reg is in the register list or is HiReg, set
6315 // 'containsReg' to true.
6316 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
6317                                  unsigned Reg, unsigned HiReg,
6318                                  bool &containsReg) {
6319   containsReg = false;
6320   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
6321     unsigned OpReg = Inst.getOperand(i).getReg();
6322     if (OpReg == Reg)
6323       containsReg = true;
6324     // Anything other than a low register isn't legal here.
6325     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
6326       return true;
6327   }
6328   return false;
6329 }
6330 
6331 // Check if the specified regisgter is in the register list of the inst,
6332 // starting at the indicated operand number.
6333 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
6334   for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
6335     unsigned OpReg = Inst.getOperand(i).getReg();
6336     if (OpReg == Reg)
6337       return true;
6338   }
6339   return false;
6340 }
6341 
6342 // Return true if instruction has the interesting property of being
6343 // allowed in IT blocks, but not being predicable.
6344 static bool instIsBreakpoint(const MCInst &Inst) {
6345     return Inst.getOpcode() == ARM::tBKPT ||
6346            Inst.getOpcode() == ARM::BKPT ||
6347            Inst.getOpcode() == ARM::tHLT ||
6348            Inst.getOpcode() == ARM::HLT;
6349 }
6350 
6351 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
6352                                        const OperandVector &Operands,
6353                                        unsigned ListNo, bool IsARPop) {
6354   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6355   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6356 
6357   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6358   bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
6359   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6360 
6361   if (!IsARPop && ListContainsSP)
6362     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6363                  "SP may not be in the register list");
6364   else if (ListContainsPC && ListContainsLR)
6365     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6366                  "PC and LR may not be in the register list simultaneously");
6367   return false;
6368 }
6369 
6370 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
6371                                        const OperandVector &Operands,
6372                                        unsigned ListNo) {
6373   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6374   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6375 
6376   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6377   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6378 
6379   if (ListContainsSP && ListContainsPC)
6380     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6381                  "SP and PC may not be in the register list");
6382   else if (ListContainsSP)
6383     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6384                  "SP may not be in the register list");
6385   else if (ListContainsPC)
6386     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6387                  "PC may not be in the register list");
6388   return false;
6389 }
6390 
6391 bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst,
6392                                     const OperandVector &Operands,
6393                                     bool Load, bool ARMMode, bool Writeback) {
6394   unsigned RtIndex = Load || !Writeback ? 0 : 1;
6395   unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg());
6396   unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg());
6397 
6398   if (ARMMode) {
6399     // Rt can't be R14.
6400     if (Rt == 14)
6401       return Error(Operands[3]->getStartLoc(),
6402                   "Rt can't be R14");
6403 
6404     // Rt must be even-numbered.
6405     if ((Rt & 1) == 1)
6406       return Error(Operands[3]->getStartLoc(),
6407                    "Rt must be even-numbered");
6408 
6409     // Rt2 must be Rt + 1.
6410     if (Rt2 != Rt + 1) {
6411       if (Load)
6412         return Error(Operands[3]->getStartLoc(),
6413                      "destination operands must be sequential");
6414       else
6415         return Error(Operands[3]->getStartLoc(),
6416                      "source operands must be sequential");
6417     }
6418 
6419     // FIXME: Diagnose m == 15
6420     // FIXME: Diagnose ldrd with m == t || m == t2.
6421   }
6422 
6423   if (!ARMMode && Load) {
6424     if (Rt2 == Rt)
6425       return Error(Operands[3]->getStartLoc(),
6426                    "destination operands can't be identical");
6427   }
6428 
6429   if (Writeback) {
6430     unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
6431 
6432     if (Rn == Rt || Rn == Rt2) {
6433       if (Load)
6434         return Error(Operands[3]->getStartLoc(),
6435                      "base register needs to be different from destination "
6436                      "registers");
6437       else
6438         return Error(Operands[3]->getStartLoc(),
6439                      "source register and base register can't be identical");
6440     }
6441 
6442     // FIXME: Diagnose ldrd/strd with writeback and n == 15.
6443     // (Except the immediate form of ldrd?)
6444   }
6445 
6446   return false;
6447 }
6448 
6449 
6450 // FIXME: We would really like to be able to tablegen'erate this.
6451 bool ARMAsmParser::validateInstruction(MCInst &Inst,
6452                                        const OperandVector &Operands) {
6453   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
6454   SMLoc Loc = Operands[0]->getStartLoc();
6455 
6456   // Check the IT block state first.
6457   // NOTE: BKPT and HLT instructions have the interesting property of being
6458   // allowed in IT blocks, but not being predicable. They just always execute.
6459   if (inITBlock() && !instIsBreakpoint(Inst)) {
6460     // The instruction must be predicable.
6461     if (!MCID.isPredicable())
6462       return Error(Loc, "instructions in IT block must be predicable");
6463     ARMCC::CondCodes Cond = ARMCC::CondCodes(
6464         Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm());
6465     if (Cond != currentITCond()) {
6466       // Find the condition code Operand to get its SMLoc information.
6467       SMLoc CondLoc;
6468       for (unsigned I = 1; I < Operands.size(); ++I)
6469         if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
6470           CondLoc = Operands[I]->getStartLoc();
6471       return Error(CondLoc, "incorrect condition in IT block; got '" +
6472                                 StringRef(ARMCondCodeToString(Cond)) +
6473                                 "', but expected '" +
6474                                 ARMCondCodeToString(currentITCond()) + "'");
6475     }
6476   // Check for non-'al' condition codes outside of the IT block.
6477   } else if (isThumbTwo() && MCID.isPredicable() &&
6478              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6479              ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
6480              Inst.getOpcode() != ARM::t2Bcc) {
6481     return Error(Loc, "predicated instructions must be in IT block");
6482   } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
6483              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6484                  ARMCC::AL) {
6485     return Warning(Loc, "predicated instructions should be in IT block");
6486   } else if (!MCID.isPredicable()) {
6487     // Check the instruction doesn't have a predicate operand anyway
6488     // that it's not allowed to use. Sometimes this happens in order
6489     // to keep instructions the same shape even though one cannot
6490     // legally be predicated, e.g. vmul.f16 vs vmul.f32.
6491     for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
6492       if (MCID.OpInfo[i].isPredicate()) {
6493         if (Inst.getOperand(i).getImm() != ARMCC::AL)
6494           return Error(Loc, "instruction is not predicable");
6495         break;
6496       }
6497     }
6498   }
6499 
6500   // PC-setting instructions in an IT block, but not the last instruction of
6501   // the block, are UNPREDICTABLE.
6502   if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
6503     return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
6504   }
6505 
6506   const unsigned Opcode = Inst.getOpcode();
6507   switch (Opcode) {
6508   case ARM::t2IT: {
6509     // Encoding is unpredictable if it ever results in a notional 'NV'
6510     // predicate. Since we don't parse 'NV' directly this means an 'AL'
6511     // predicate with an "else" mask bit.
6512     unsigned Cond = Inst.getOperand(0).getImm();
6513     unsigned Mask = Inst.getOperand(1).getImm();
6514 
6515     // Mask hasn't been modified to the IT instruction encoding yet so
6516     // conditions only allowing a 't' are a block of 1s starting at bit 3
6517     // followed by all 0s. Easiest way is to just list the 4 possibilities.
6518     if (Cond == ARMCC::AL && Mask != 8 && Mask != 12 && Mask != 14 &&
6519         Mask != 15)
6520       return Error(Loc, "unpredictable IT predicate sequence");
6521     break;
6522   }
6523   case ARM::LDRD:
6524     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
6525                          /*Writeback*/false))
6526       return true;
6527     break;
6528   case ARM::LDRD_PRE:
6529   case ARM::LDRD_POST:
6530     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true,
6531                          /*Writeback*/true))
6532       return true;
6533     break;
6534   case ARM::t2LDRDi8:
6535     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
6536                          /*Writeback*/false))
6537       return true;
6538     break;
6539   case ARM::t2LDRD_PRE:
6540   case ARM::t2LDRD_POST:
6541     if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false,
6542                          /*Writeback*/true))
6543       return true;
6544     break;
6545   case ARM::t2BXJ: {
6546     const unsigned RmReg = Inst.getOperand(0).getReg();
6547     // Rm = SP is no longer unpredictable in v8-A
6548     if (RmReg == ARM::SP && !hasV8Ops())
6549       return Error(Operands[2]->getStartLoc(),
6550                    "r13 (SP) is an unpredictable operand to BXJ");
6551     return false;
6552   }
6553   case ARM::STRD:
6554     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
6555                          /*Writeback*/false))
6556       return true;
6557     break;
6558   case ARM::STRD_PRE:
6559   case ARM::STRD_POST:
6560     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true,
6561                          /*Writeback*/true))
6562       return true;
6563     break;
6564   case ARM::t2STRD_PRE:
6565   case ARM::t2STRD_POST:
6566     if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false,
6567                          /*Writeback*/true))
6568       return true;
6569     break;
6570   case ARM::STR_PRE_IMM:
6571   case ARM::STR_PRE_REG:
6572   case ARM::t2STR_PRE:
6573   case ARM::STR_POST_IMM:
6574   case ARM::STR_POST_REG:
6575   case ARM::t2STR_POST:
6576   case ARM::STRH_PRE:
6577   case ARM::t2STRH_PRE:
6578   case ARM::STRH_POST:
6579   case ARM::t2STRH_POST:
6580   case ARM::STRB_PRE_IMM:
6581   case ARM::STRB_PRE_REG:
6582   case ARM::t2STRB_PRE:
6583   case ARM::STRB_POST_IMM:
6584   case ARM::STRB_POST_REG:
6585   case ARM::t2STRB_POST: {
6586     // Rt must be different from Rn.
6587     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6588     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6589 
6590     if (Rt == Rn)
6591       return Error(Operands[3]->getStartLoc(),
6592                    "source register and base register can't be identical");
6593     return false;
6594   }
6595   case ARM::LDR_PRE_IMM:
6596   case ARM::LDR_PRE_REG:
6597   case ARM::t2LDR_PRE:
6598   case ARM::LDR_POST_IMM:
6599   case ARM::LDR_POST_REG:
6600   case ARM::t2LDR_POST:
6601   case ARM::LDRH_PRE:
6602   case ARM::t2LDRH_PRE:
6603   case ARM::LDRH_POST:
6604   case ARM::t2LDRH_POST:
6605   case ARM::LDRSH_PRE:
6606   case ARM::t2LDRSH_PRE:
6607   case ARM::LDRSH_POST:
6608   case ARM::t2LDRSH_POST:
6609   case ARM::LDRB_PRE_IMM:
6610   case ARM::LDRB_PRE_REG:
6611   case ARM::t2LDRB_PRE:
6612   case ARM::LDRB_POST_IMM:
6613   case ARM::LDRB_POST_REG:
6614   case ARM::t2LDRB_POST:
6615   case ARM::LDRSB_PRE:
6616   case ARM::t2LDRSB_PRE:
6617   case ARM::LDRSB_POST:
6618   case ARM::t2LDRSB_POST: {
6619     // Rt must be different from Rn.
6620     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6621     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6622 
6623     if (Rt == Rn)
6624       return Error(Operands[3]->getStartLoc(),
6625                    "destination register and base register can't be identical");
6626     return false;
6627   }
6628   case ARM::SBFX:
6629   case ARM::t2SBFX:
6630   case ARM::UBFX:
6631   case ARM::t2UBFX: {
6632     // Width must be in range [1, 32-lsb].
6633     unsigned LSB = Inst.getOperand(2).getImm();
6634     unsigned Widthm1 = Inst.getOperand(3).getImm();
6635     if (Widthm1 >= 32 - LSB)
6636       return Error(Operands[5]->getStartLoc(),
6637                    "bitfield width must be in range [1,32-lsb]");
6638     return false;
6639   }
6640   // Notionally handles ARM::tLDMIA_UPD too.
6641   case ARM::tLDMIA: {
6642     // If we're parsing Thumb2, the .w variant is available and handles
6643     // most cases that are normally illegal for a Thumb1 LDM instruction.
6644     // We'll make the transformation in processInstruction() if necessary.
6645     //
6646     // Thumb LDM instructions are writeback iff the base register is not
6647     // in the register list.
6648     unsigned Rn = Inst.getOperand(0).getReg();
6649     bool HasWritebackToken =
6650         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6651          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
6652     bool ListContainsBase;
6653     if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
6654       return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
6655                    "registers must be in range r0-r7");
6656     // If we should have writeback, then there should be a '!' token.
6657     if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
6658       return Error(Operands[2]->getStartLoc(),
6659                    "writeback operator '!' expected");
6660     // If we should not have writeback, there must not be a '!'. This is
6661     // true even for the 32-bit wide encodings.
6662     if (ListContainsBase && HasWritebackToken)
6663       return Error(Operands[3]->getStartLoc(),
6664                    "writeback operator '!' not allowed when base register "
6665                    "in register list");
6666 
6667     if (validatetLDMRegList(Inst, Operands, 3))
6668       return true;
6669     break;
6670   }
6671   case ARM::LDMIA_UPD:
6672   case ARM::LDMDB_UPD:
6673   case ARM::LDMIB_UPD:
6674   case ARM::LDMDA_UPD:
6675     // ARM variants loading and updating the same register are only officially
6676     // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
6677     if (!hasV7Ops())
6678       break;
6679     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6680       return Error(Operands.back()->getStartLoc(),
6681                    "writeback register not allowed in register list");
6682     break;
6683   case ARM::t2LDMIA:
6684   case ARM::t2LDMDB:
6685     if (validatetLDMRegList(Inst, Operands, 3))
6686       return true;
6687     break;
6688   case ARM::t2STMIA:
6689   case ARM::t2STMDB:
6690     if (validatetSTMRegList(Inst, Operands, 3))
6691       return true;
6692     break;
6693   case ARM::t2LDMIA_UPD:
6694   case ARM::t2LDMDB_UPD:
6695   case ARM::t2STMIA_UPD:
6696   case ARM::t2STMDB_UPD:
6697     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6698       return Error(Operands.back()->getStartLoc(),
6699                    "writeback register not allowed in register list");
6700 
6701     if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
6702       if (validatetLDMRegList(Inst, Operands, 3))
6703         return true;
6704     } else {
6705       if (validatetSTMRegList(Inst, Operands, 3))
6706         return true;
6707     }
6708     break;
6709 
6710   case ARM::sysLDMIA_UPD:
6711   case ARM::sysLDMDA_UPD:
6712   case ARM::sysLDMDB_UPD:
6713   case ARM::sysLDMIB_UPD:
6714     if (!listContainsReg(Inst, 3, ARM::PC))
6715       return Error(Operands[4]->getStartLoc(),
6716                    "writeback register only allowed on system LDM "
6717                    "if PC in register-list");
6718     break;
6719   case ARM::sysSTMIA_UPD:
6720   case ARM::sysSTMDA_UPD:
6721   case ARM::sysSTMDB_UPD:
6722   case ARM::sysSTMIB_UPD:
6723     return Error(Operands[2]->getStartLoc(),
6724                  "system STM cannot have writeback register");
6725   case ARM::tMUL:
6726     // The second source operand must be the same register as the destination
6727     // operand.
6728     //
6729     // In this case, we must directly check the parsed operands because the
6730     // cvtThumbMultiply() function is written in such a way that it guarantees
6731     // this first statement is always true for the new Inst.  Essentially, the
6732     // destination is unconditionally copied into the second source operand
6733     // without checking to see if it matches what we actually parsed.
6734     if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
6735                                  ((ARMOperand &)*Operands[5]).getReg()) &&
6736         (((ARMOperand &)*Operands[3]).getReg() !=
6737          ((ARMOperand &)*Operands[4]).getReg())) {
6738       return Error(Operands[3]->getStartLoc(),
6739                    "destination register must match source register");
6740     }
6741     break;
6742 
6743   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
6744   // so only issue a diagnostic for thumb1. The instructions will be
6745   // switched to the t2 encodings in processInstruction() if necessary.
6746   case ARM::tPOP: {
6747     bool ListContainsBase;
6748     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
6749         !isThumbTwo())
6750       return Error(Operands[2]->getStartLoc(),
6751                    "registers must be in range r0-r7 or pc");
6752     if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
6753       return true;
6754     break;
6755   }
6756   case ARM::tPUSH: {
6757     bool ListContainsBase;
6758     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
6759         !isThumbTwo())
6760       return Error(Operands[2]->getStartLoc(),
6761                    "registers must be in range r0-r7 or lr");
6762     if (validatetSTMRegList(Inst, Operands, 2))
6763       return true;
6764     break;
6765   }
6766   case ARM::tSTMIA_UPD: {
6767     bool ListContainsBase, InvalidLowList;
6768     InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
6769                                           0, ListContainsBase);
6770     if (InvalidLowList && !isThumbTwo())
6771       return Error(Operands[4]->getStartLoc(),
6772                    "registers must be in range r0-r7");
6773 
6774     // This would be converted to a 32-bit stm, but that's not valid if the
6775     // writeback register is in the list.
6776     if (InvalidLowList && ListContainsBase)
6777       return Error(Operands[4]->getStartLoc(),
6778                    "writeback operator '!' not allowed when base register "
6779                    "in register list");
6780 
6781     if (validatetSTMRegList(Inst, Operands, 4))
6782       return true;
6783     break;
6784   }
6785   case ARM::tADDrSP:
6786     // If the non-SP source operand and the destination operand are not the
6787     // same, we need thumb2 (for the wide encoding), or we have an error.
6788     if (!isThumbTwo() &&
6789         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
6790       return Error(Operands[4]->getStartLoc(),
6791                    "source register must be the same as destination");
6792     }
6793     break;
6794 
6795   case ARM::t2ADDri:
6796   case ARM::t2ADDri12:
6797   case ARM::t2ADDrr:
6798   case ARM::t2ADDrs:
6799   case ARM::t2SUBri:
6800   case ARM::t2SUBri12:
6801   case ARM::t2SUBrr:
6802   case ARM::t2SUBrs:
6803     if (Inst.getOperand(0).getReg() == ARM::SP &&
6804         Inst.getOperand(1).getReg() != ARM::SP)
6805       return Error(Operands[4]->getStartLoc(),
6806                    "source register must be sp if destination is sp");
6807     break;
6808 
6809   // Final range checking for Thumb unconditional branch instructions.
6810   case ARM::tB:
6811     if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
6812       return Error(Operands[2]->getStartLoc(), "branch target out of range");
6813     break;
6814   case ARM::t2B: {
6815     int op = (Operands[2]->isImm()) ? 2 : 3;
6816     if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
6817       return Error(Operands[op]->getStartLoc(), "branch target out of range");
6818     break;
6819   }
6820   // Final range checking for Thumb conditional branch instructions.
6821   case ARM::tBcc:
6822     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
6823       return Error(Operands[2]->getStartLoc(), "branch target out of range");
6824     break;
6825   case ARM::t2Bcc: {
6826     int Op = (Operands[2]->isImm()) ? 2 : 3;
6827     if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
6828       return Error(Operands[Op]->getStartLoc(), "branch target out of range");
6829     break;
6830   }
6831   case ARM::tCBZ:
6832   case ARM::tCBNZ: {
6833     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
6834       return Error(Operands[2]->getStartLoc(), "branch target out of range");
6835     break;
6836   }
6837   case ARM::MOVi16:
6838   case ARM::MOVTi16:
6839   case ARM::t2MOVi16:
6840   case ARM::t2MOVTi16:
6841     {
6842     // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
6843     // especially when we turn it into a movw and the expression <symbol> does
6844     // not have a :lower16: or :upper16 as part of the expression.  We don't
6845     // want the behavior of silently truncating, which can be unexpected and
6846     // lead to bugs that are difficult to find since this is an easy mistake
6847     // to make.
6848     int i = (Operands[3]->isImm()) ? 3 : 4;
6849     ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
6850     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6851     if (CE) break;
6852     const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6853     if (!E) break;
6854     const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6855     if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
6856                        ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
6857       return Error(
6858           Op.getStartLoc(),
6859           "immediate expression for mov requires :lower16: or :upper16");
6860     break;
6861   }
6862   case ARM::HINT:
6863   case ARM::t2HINT: {
6864     unsigned Imm8 = Inst.getOperand(0).getImm();
6865     unsigned Pred = Inst.getOperand(1).getImm();
6866     // ESB is not predicable (pred must be AL). Without the RAS extension, this
6867     // behaves as any other unallocated hint.
6868     if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS())
6869       return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
6870                                                "predicable, but condition "
6871                                                "code specified");
6872     if (Imm8 == 0x14 && Pred != ARMCC::AL)
6873       return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not "
6874                                                "predicable, but condition "
6875                                                "code specified");
6876     break;
6877   }
6878   case ARM::DSB:
6879   case ARM::t2DSB: {
6880 
6881     if (Inst.getNumOperands() < 2)
6882       break;
6883 
6884     unsigned Option = Inst.getOperand(0).getImm();
6885     unsigned Pred = Inst.getOperand(1).getImm();
6886 
6887     // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL).
6888     if (Option == 0 && Pred != ARMCC::AL)
6889       return Error(Operands[1]->getStartLoc(),
6890                    "instruction 'ssbb' is not predicable, but condition code "
6891                    "specified");
6892     if (Option == 4 && Pred != ARMCC::AL)
6893       return Error(Operands[1]->getStartLoc(),
6894                    "instruction 'pssbb' is not predicable, but condition code "
6895                    "specified");
6896     break;
6897   }
6898   case ARM::VMOVRRS: {
6899     // Source registers must be sequential.
6900     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6901     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg());
6902     if (Sm1 != Sm + 1)
6903       return Error(Operands[5]->getStartLoc(),
6904                    "source operands must be sequential");
6905     break;
6906   }
6907   case ARM::VMOVSRR: {
6908     // Destination registers must be sequential.
6909     const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6910     const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6911     if (Sm1 != Sm + 1)
6912       return Error(Operands[3]->getStartLoc(),
6913                    "destination operands must be sequential");
6914     break;
6915   }
6916   case ARM::VLDMDIA:
6917   case ARM::VSTMDIA: {
6918     ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]);
6919     auto &RegList = Op.getRegList();
6920     if (RegList.size() < 1 || RegList.size() > 16)
6921       return Error(Operands[3]->getStartLoc(),
6922                    "list of registers must be at least 1 and at most 16");
6923     break;
6924   }
6925   }
6926 
6927   return false;
6928 }
6929 
6930 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
6931   switch(Opc) {
6932   default: llvm_unreachable("unexpected opcode!");
6933   // VST1LN
6934   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
6935   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6936   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6937   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
6938   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6939   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6940   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
6941   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
6942   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
6943 
6944   // VST2LN
6945   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
6946   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6947   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6948   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6949   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6950 
6951   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
6952   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6953   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6954   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6955   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6956 
6957   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
6958   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
6959   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
6960   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
6961   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
6962 
6963   // VST3LN
6964   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
6965   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6966   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6967   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
6968   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6969   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
6970   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6971   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6972   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
6973   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6974   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
6975   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
6976   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
6977   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
6978   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
6979 
6980   // VST3
6981   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
6982   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6983   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6984   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
6985   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6986   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6987   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
6988   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6989   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6990   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
6991   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6992   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6993   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
6994   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
6995   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
6996   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
6997   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
6998   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
6999 
7000   // VST4LN
7001   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
7002   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
7003   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
7004   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
7005   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
7006   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
7007   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
7008   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
7009   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
7010   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
7011   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
7012   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
7013   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
7014   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
7015   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
7016 
7017   // VST4
7018   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
7019   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
7020   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
7021   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
7022   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
7023   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
7024   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
7025   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
7026   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
7027   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
7028   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
7029   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
7030   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
7031   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
7032   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
7033   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
7034   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
7035   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
7036   }
7037 }
7038 
7039 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
7040   switch(Opc) {
7041   default: llvm_unreachable("unexpected opcode!");
7042   // VLD1LN
7043   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
7044   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
7045   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
7046   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
7047   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
7048   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
7049   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
7050   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
7051   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
7052 
7053   // VLD2LN
7054   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
7055   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
7056   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
7057   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
7058   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
7059   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
7060   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
7061   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
7062   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
7063   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
7064   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
7065   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
7066   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
7067   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
7068   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
7069 
7070   // VLD3DUP
7071   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
7072   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
7073   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
7074   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
7075   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
7076   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
7077   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
7078   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
7079   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
7080   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
7081   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
7082   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
7083   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
7084   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
7085   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
7086   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
7087   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
7088   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
7089 
7090   // VLD3LN
7091   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
7092   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
7093   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
7094   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
7095   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
7096   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
7097   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
7098   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
7099   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
7100   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
7101   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
7102   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
7103   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
7104   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
7105   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
7106 
7107   // VLD3
7108   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
7109   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
7110   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
7111   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
7112   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
7113   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
7114   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
7115   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
7116   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
7117   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
7118   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
7119   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
7120   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
7121   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
7122   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
7123   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
7124   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
7125   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
7126 
7127   // VLD4LN
7128   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
7129   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
7130   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
7131   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
7132   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
7133   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
7134   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
7135   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
7136   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
7137   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
7138   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
7139   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
7140   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
7141   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
7142   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
7143 
7144   // VLD4DUP
7145   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
7146   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
7147   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
7148   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
7149   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
7150   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
7151   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
7152   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
7153   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
7154   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
7155   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
7156   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
7157   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
7158   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
7159   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
7160   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
7161   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
7162   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
7163 
7164   // VLD4
7165   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
7166   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
7167   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
7168   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
7169   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
7170   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
7171   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
7172   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
7173   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
7174   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
7175   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
7176   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
7177   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
7178   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
7179   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
7180   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
7181   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
7182   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
7183   }
7184 }
7185 
7186 bool ARMAsmParser::processInstruction(MCInst &Inst,
7187                                       const OperandVector &Operands,
7188                                       MCStreamer &Out) {
7189   // Check if we have the wide qualifier, because if it's present we
7190   // must avoid selecting a 16-bit thumb instruction.
7191   bool HasWideQualifier = false;
7192   for (auto &Op : Operands) {
7193     ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
7194     if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
7195       HasWideQualifier = true;
7196       break;
7197     }
7198   }
7199 
7200   switch (Inst.getOpcode()) {
7201   // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
7202   case ARM::LDRT_POST:
7203   case ARM::LDRBT_POST: {
7204     const unsigned Opcode =
7205       (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
7206                                            : ARM::LDRBT_POST_IMM;
7207     MCInst TmpInst;
7208     TmpInst.setOpcode(Opcode);
7209     TmpInst.addOperand(Inst.getOperand(0));
7210     TmpInst.addOperand(Inst.getOperand(1));
7211     TmpInst.addOperand(Inst.getOperand(1));
7212     TmpInst.addOperand(MCOperand::createReg(0));
7213     TmpInst.addOperand(MCOperand::createImm(0));
7214     TmpInst.addOperand(Inst.getOperand(2));
7215     TmpInst.addOperand(Inst.getOperand(3));
7216     Inst = TmpInst;
7217     return true;
7218   }
7219   // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
7220   case ARM::STRT_POST:
7221   case ARM::STRBT_POST: {
7222     const unsigned Opcode =
7223       (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
7224                                            : ARM::STRBT_POST_IMM;
7225     MCInst TmpInst;
7226     TmpInst.setOpcode(Opcode);
7227     TmpInst.addOperand(Inst.getOperand(1));
7228     TmpInst.addOperand(Inst.getOperand(0));
7229     TmpInst.addOperand(Inst.getOperand(1));
7230     TmpInst.addOperand(MCOperand::createReg(0));
7231     TmpInst.addOperand(MCOperand::createImm(0));
7232     TmpInst.addOperand(Inst.getOperand(2));
7233     TmpInst.addOperand(Inst.getOperand(3));
7234     Inst = TmpInst;
7235     return true;
7236   }
7237   // Alias for alternate form of 'ADR Rd, #imm' instruction.
7238   case ARM::ADDri: {
7239     if (Inst.getOperand(1).getReg() != ARM::PC ||
7240         Inst.getOperand(5).getReg() != 0 ||
7241         !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
7242       return false;
7243     MCInst TmpInst;
7244     TmpInst.setOpcode(ARM::ADR);
7245     TmpInst.addOperand(Inst.getOperand(0));
7246     if (Inst.getOperand(2).isImm()) {
7247       // Immediate (mod_imm) will be in its encoded form, we must unencode it
7248       // before passing it to the ADR instruction.
7249       unsigned Enc = Inst.getOperand(2).getImm();
7250       TmpInst.addOperand(MCOperand::createImm(
7251         ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
7252     } else {
7253       // Turn PC-relative expression into absolute expression.
7254       // Reading PC provides the start of the current instruction + 8 and
7255       // the transform to adr is biased by that.
7256       MCSymbol *Dot = getContext().createTempSymbol();
7257       Out.EmitLabel(Dot);
7258       const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
7259       const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
7260                                                      MCSymbolRefExpr::VK_None,
7261                                                      getContext());
7262       const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
7263       const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
7264                                                      getContext());
7265       const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
7266                                                         getContext());
7267       TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
7268     }
7269     TmpInst.addOperand(Inst.getOperand(3));
7270     TmpInst.addOperand(Inst.getOperand(4));
7271     Inst = TmpInst;
7272     return true;
7273   }
7274   // Aliases for alternate PC+imm syntax of LDR instructions.
7275   case ARM::t2LDRpcrel:
7276     // Select the narrow version if the immediate will fit.
7277     if (Inst.getOperand(1).getImm() > 0 &&
7278         Inst.getOperand(1).getImm() <= 0xff &&
7279         !HasWideQualifier)
7280       Inst.setOpcode(ARM::tLDRpci);
7281     else
7282       Inst.setOpcode(ARM::t2LDRpci);
7283     return true;
7284   case ARM::t2LDRBpcrel:
7285     Inst.setOpcode(ARM::t2LDRBpci);
7286     return true;
7287   case ARM::t2LDRHpcrel:
7288     Inst.setOpcode(ARM::t2LDRHpci);
7289     return true;
7290   case ARM::t2LDRSBpcrel:
7291     Inst.setOpcode(ARM::t2LDRSBpci);
7292     return true;
7293   case ARM::t2LDRSHpcrel:
7294     Inst.setOpcode(ARM::t2LDRSHpci);
7295     return true;
7296   case ARM::LDRConstPool:
7297   case ARM::tLDRConstPool:
7298   case ARM::t2LDRConstPool: {
7299     // Pseudo instruction ldr rt, =immediate is converted to a
7300     // MOV rt, immediate if immediate is known and representable
7301     // otherwise we create a constant pool entry that we load from.
7302     MCInst TmpInst;
7303     if (Inst.getOpcode() == ARM::LDRConstPool)
7304       TmpInst.setOpcode(ARM::LDRi12);
7305     else if (Inst.getOpcode() == ARM::tLDRConstPool)
7306       TmpInst.setOpcode(ARM::tLDRpci);
7307     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
7308       TmpInst.setOpcode(ARM::t2LDRpci);
7309     const ARMOperand &PoolOperand =
7310       (HasWideQualifier ?
7311        static_cast<ARMOperand &>(*Operands[4]) :
7312        static_cast<ARMOperand &>(*Operands[3]));
7313     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
7314     // If SubExprVal is a constant we may be able to use a MOV
7315     if (isa<MCConstantExpr>(SubExprVal) &&
7316         Inst.getOperand(0).getReg() != ARM::PC &&
7317         Inst.getOperand(0).getReg() != ARM::SP) {
7318       int64_t Value =
7319         (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
7320       bool UseMov  = true;
7321       bool MovHasS = true;
7322       if (Inst.getOpcode() == ARM::LDRConstPool) {
7323         // ARM Constant
7324         if (ARM_AM::getSOImmVal(Value) != -1) {
7325           Value = ARM_AM::getSOImmVal(Value);
7326           TmpInst.setOpcode(ARM::MOVi);
7327         }
7328         else if (ARM_AM::getSOImmVal(~Value) != -1) {
7329           Value = ARM_AM::getSOImmVal(~Value);
7330           TmpInst.setOpcode(ARM::MVNi);
7331         }
7332         else if (hasV6T2Ops() &&
7333                  Value >=0 && Value < 65536) {
7334           TmpInst.setOpcode(ARM::MOVi16);
7335           MovHasS = false;
7336         }
7337         else
7338           UseMov = false;
7339       }
7340       else {
7341         // Thumb/Thumb2 Constant
7342         if (hasThumb2() &&
7343             ARM_AM::getT2SOImmVal(Value) != -1)
7344           TmpInst.setOpcode(ARM::t2MOVi);
7345         else if (hasThumb2() &&
7346                  ARM_AM::getT2SOImmVal(~Value) != -1) {
7347           TmpInst.setOpcode(ARM::t2MVNi);
7348           Value = ~Value;
7349         }
7350         else if (hasV8MBaseline() &&
7351                  Value >=0 && Value < 65536) {
7352           TmpInst.setOpcode(ARM::t2MOVi16);
7353           MovHasS = false;
7354         }
7355         else
7356           UseMov = false;
7357       }
7358       if (UseMov) {
7359         TmpInst.addOperand(Inst.getOperand(0));           // Rt
7360         TmpInst.addOperand(MCOperand::createImm(Value));  // Immediate
7361         TmpInst.addOperand(Inst.getOperand(2));           // CondCode
7362         TmpInst.addOperand(Inst.getOperand(3));           // CondCode
7363         if (MovHasS)
7364           TmpInst.addOperand(MCOperand::createReg(0));    // S
7365         Inst = TmpInst;
7366         return true;
7367       }
7368     }
7369     // No opportunity to use MOV/MVN create constant pool
7370     const MCExpr *CPLoc =
7371       getTargetStreamer().addConstantPoolEntry(SubExprVal,
7372                                                PoolOperand.getStartLoc());
7373     TmpInst.addOperand(Inst.getOperand(0));           // Rt
7374     TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
7375     if (TmpInst.getOpcode() == ARM::LDRi12)
7376       TmpInst.addOperand(MCOperand::createImm(0));    // unused offset
7377     TmpInst.addOperand(Inst.getOperand(2));           // CondCode
7378     TmpInst.addOperand(Inst.getOperand(3));           // CondCode
7379     Inst = TmpInst;
7380     return true;
7381   }
7382   // Handle NEON VST complex aliases.
7383   case ARM::VST1LNdWB_register_Asm_8:
7384   case ARM::VST1LNdWB_register_Asm_16:
7385   case ARM::VST1LNdWB_register_Asm_32: {
7386     MCInst TmpInst;
7387     // Shuffle the operands around so the lane index operand is in the
7388     // right place.
7389     unsigned Spacing;
7390     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7391     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7392     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7393     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7394     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7395     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7396     TmpInst.addOperand(Inst.getOperand(1)); // lane
7397     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7398     TmpInst.addOperand(Inst.getOperand(6));
7399     Inst = TmpInst;
7400     return true;
7401   }
7402 
7403   case ARM::VST2LNdWB_register_Asm_8:
7404   case ARM::VST2LNdWB_register_Asm_16:
7405   case ARM::VST2LNdWB_register_Asm_32:
7406   case ARM::VST2LNqWB_register_Asm_16:
7407   case ARM::VST2LNqWB_register_Asm_32: {
7408     MCInst TmpInst;
7409     // Shuffle the operands around so the lane index operand is in the
7410     // right place.
7411     unsigned Spacing;
7412     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7413     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7414     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7415     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7416     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7417     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7418     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7419                                             Spacing));
7420     TmpInst.addOperand(Inst.getOperand(1)); // lane
7421     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7422     TmpInst.addOperand(Inst.getOperand(6));
7423     Inst = TmpInst;
7424     return true;
7425   }
7426 
7427   case ARM::VST3LNdWB_register_Asm_8:
7428   case ARM::VST3LNdWB_register_Asm_16:
7429   case ARM::VST3LNdWB_register_Asm_32:
7430   case ARM::VST3LNqWB_register_Asm_16:
7431   case ARM::VST3LNqWB_register_Asm_32: {
7432     MCInst TmpInst;
7433     // Shuffle the operands around so the lane index operand is in the
7434     // right place.
7435     unsigned Spacing;
7436     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7437     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7438     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7439     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7440     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7441     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7442     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7443                                             Spacing));
7444     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7445                                             Spacing * 2));
7446     TmpInst.addOperand(Inst.getOperand(1)); // lane
7447     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7448     TmpInst.addOperand(Inst.getOperand(6));
7449     Inst = TmpInst;
7450     return true;
7451   }
7452 
7453   case ARM::VST4LNdWB_register_Asm_8:
7454   case ARM::VST4LNdWB_register_Asm_16:
7455   case ARM::VST4LNdWB_register_Asm_32:
7456   case ARM::VST4LNqWB_register_Asm_16:
7457   case ARM::VST4LNqWB_register_Asm_32: {
7458     MCInst TmpInst;
7459     // Shuffle the operands around so the lane index operand is in the
7460     // right place.
7461     unsigned Spacing;
7462     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7463     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7464     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7465     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7466     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7467     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7468     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7469                                             Spacing));
7470     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7471                                             Spacing * 2));
7472     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7473                                             Spacing * 3));
7474     TmpInst.addOperand(Inst.getOperand(1)); // lane
7475     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7476     TmpInst.addOperand(Inst.getOperand(6));
7477     Inst = TmpInst;
7478     return true;
7479   }
7480 
7481   case ARM::VST1LNdWB_fixed_Asm_8:
7482   case ARM::VST1LNdWB_fixed_Asm_16:
7483   case ARM::VST1LNdWB_fixed_Asm_32: {
7484     MCInst TmpInst;
7485     // Shuffle the operands around so the lane index operand is in the
7486     // right place.
7487     unsigned Spacing;
7488     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7489     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7490     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7491     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7492     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7493     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7494     TmpInst.addOperand(Inst.getOperand(1)); // lane
7495     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7496     TmpInst.addOperand(Inst.getOperand(5));
7497     Inst = TmpInst;
7498     return true;
7499   }
7500 
7501   case ARM::VST2LNdWB_fixed_Asm_8:
7502   case ARM::VST2LNdWB_fixed_Asm_16:
7503   case ARM::VST2LNdWB_fixed_Asm_32:
7504   case ARM::VST2LNqWB_fixed_Asm_16:
7505   case ARM::VST2LNqWB_fixed_Asm_32: {
7506     MCInst TmpInst;
7507     // Shuffle the operands around so the lane index operand is in the
7508     // right place.
7509     unsigned Spacing;
7510     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7511     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7512     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7513     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7514     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7515     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7516     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7517                                             Spacing));
7518     TmpInst.addOperand(Inst.getOperand(1)); // lane
7519     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7520     TmpInst.addOperand(Inst.getOperand(5));
7521     Inst = TmpInst;
7522     return true;
7523   }
7524 
7525   case ARM::VST3LNdWB_fixed_Asm_8:
7526   case ARM::VST3LNdWB_fixed_Asm_16:
7527   case ARM::VST3LNdWB_fixed_Asm_32:
7528   case ARM::VST3LNqWB_fixed_Asm_16:
7529   case ARM::VST3LNqWB_fixed_Asm_32: {
7530     MCInst TmpInst;
7531     // Shuffle the operands around so the lane index operand is in the
7532     // right place.
7533     unsigned Spacing;
7534     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7535     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7536     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7537     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7538     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7539     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7540     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7541                                             Spacing));
7542     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7543                                             Spacing * 2));
7544     TmpInst.addOperand(Inst.getOperand(1)); // lane
7545     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7546     TmpInst.addOperand(Inst.getOperand(5));
7547     Inst = TmpInst;
7548     return true;
7549   }
7550 
7551   case ARM::VST4LNdWB_fixed_Asm_8:
7552   case ARM::VST4LNdWB_fixed_Asm_16:
7553   case ARM::VST4LNdWB_fixed_Asm_32:
7554   case ARM::VST4LNqWB_fixed_Asm_16:
7555   case ARM::VST4LNqWB_fixed_Asm_32: {
7556     MCInst TmpInst;
7557     // Shuffle the operands around so the lane index operand is in the
7558     // right place.
7559     unsigned Spacing;
7560     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7561     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7562     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7563     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7564     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7565     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7566     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7567                                             Spacing));
7568     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7569                                             Spacing * 2));
7570     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7571                                             Spacing * 3));
7572     TmpInst.addOperand(Inst.getOperand(1)); // lane
7573     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7574     TmpInst.addOperand(Inst.getOperand(5));
7575     Inst = TmpInst;
7576     return true;
7577   }
7578 
7579   case ARM::VST1LNdAsm_8:
7580   case ARM::VST1LNdAsm_16:
7581   case ARM::VST1LNdAsm_32: {
7582     MCInst TmpInst;
7583     // Shuffle the operands around so the lane index operand is in the
7584     // right place.
7585     unsigned Spacing;
7586     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7587     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7588     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7589     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7590     TmpInst.addOperand(Inst.getOperand(1)); // lane
7591     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7592     TmpInst.addOperand(Inst.getOperand(5));
7593     Inst = TmpInst;
7594     return true;
7595   }
7596 
7597   case ARM::VST2LNdAsm_8:
7598   case ARM::VST2LNdAsm_16:
7599   case ARM::VST2LNdAsm_32:
7600   case ARM::VST2LNqAsm_16:
7601   case ARM::VST2LNqAsm_32: {
7602     MCInst TmpInst;
7603     // Shuffle the operands around so the lane index operand is in the
7604     // right place.
7605     unsigned Spacing;
7606     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7607     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7608     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7609     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7610     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7611                                             Spacing));
7612     TmpInst.addOperand(Inst.getOperand(1)); // lane
7613     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7614     TmpInst.addOperand(Inst.getOperand(5));
7615     Inst = TmpInst;
7616     return true;
7617   }
7618 
7619   case ARM::VST3LNdAsm_8:
7620   case ARM::VST3LNdAsm_16:
7621   case ARM::VST3LNdAsm_32:
7622   case ARM::VST3LNqAsm_16:
7623   case ARM::VST3LNqAsm_32: {
7624     MCInst TmpInst;
7625     // Shuffle the operands around so the lane index operand is in the
7626     // right place.
7627     unsigned Spacing;
7628     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7629     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7630     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7631     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7632     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7633                                             Spacing));
7634     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7635                                             Spacing * 2));
7636     TmpInst.addOperand(Inst.getOperand(1)); // lane
7637     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7638     TmpInst.addOperand(Inst.getOperand(5));
7639     Inst = TmpInst;
7640     return true;
7641   }
7642 
7643   case ARM::VST4LNdAsm_8:
7644   case ARM::VST4LNdAsm_16:
7645   case ARM::VST4LNdAsm_32:
7646   case ARM::VST4LNqAsm_16:
7647   case ARM::VST4LNqAsm_32: {
7648     MCInst TmpInst;
7649     // Shuffle the operands around so the lane index operand is in the
7650     // right place.
7651     unsigned Spacing;
7652     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7653     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7654     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7655     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7656     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7657                                             Spacing));
7658     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7659                                             Spacing * 2));
7660     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7661                                             Spacing * 3));
7662     TmpInst.addOperand(Inst.getOperand(1)); // lane
7663     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7664     TmpInst.addOperand(Inst.getOperand(5));
7665     Inst = TmpInst;
7666     return true;
7667   }
7668 
7669   // Handle NEON VLD complex aliases.
7670   case ARM::VLD1LNdWB_register_Asm_8:
7671   case ARM::VLD1LNdWB_register_Asm_16:
7672   case ARM::VLD1LNdWB_register_Asm_32: {
7673     MCInst TmpInst;
7674     // Shuffle the operands around so the lane index operand is in the
7675     // right place.
7676     unsigned Spacing;
7677     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7678     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7679     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7680     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7681     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7682     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7683     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7684     TmpInst.addOperand(Inst.getOperand(1)); // lane
7685     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7686     TmpInst.addOperand(Inst.getOperand(6));
7687     Inst = TmpInst;
7688     return true;
7689   }
7690 
7691   case ARM::VLD2LNdWB_register_Asm_8:
7692   case ARM::VLD2LNdWB_register_Asm_16:
7693   case ARM::VLD2LNdWB_register_Asm_32:
7694   case ARM::VLD2LNqWB_register_Asm_16:
7695   case ARM::VLD2LNqWB_register_Asm_32: {
7696     MCInst TmpInst;
7697     // Shuffle the operands around so the lane index operand is in the
7698     // right place.
7699     unsigned Spacing;
7700     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7701     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7702     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7703                                             Spacing));
7704     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7705     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7706     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7707     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7708     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7709     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7710                                             Spacing));
7711     TmpInst.addOperand(Inst.getOperand(1)); // lane
7712     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7713     TmpInst.addOperand(Inst.getOperand(6));
7714     Inst = TmpInst;
7715     return true;
7716   }
7717 
7718   case ARM::VLD3LNdWB_register_Asm_8:
7719   case ARM::VLD3LNdWB_register_Asm_16:
7720   case ARM::VLD3LNdWB_register_Asm_32:
7721   case ARM::VLD3LNqWB_register_Asm_16:
7722   case ARM::VLD3LNqWB_register_Asm_32: {
7723     MCInst TmpInst;
7724     // Shuffle the operands around so the lane index operand is in the
7725     // right place.
7726     unsigned Spacing;
7727     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7728     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7729     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7730                                             Spacing));
7731     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7732                                             Spacing * 2));
7733     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7734     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7735     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7736     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7737     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7738     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7739                                             Spacing));
7740     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7741                                             Spacing * 2));
7742     TmpInst.addOperand(Inst.getOperand(1)); // lane
7743     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7744     TmpInst.addOperand(Inst.getOperand(6));
7745     Inst = TmpInst;
7746     return true;
7747   }
7748 
7749   case ARM::VLD4LNdWB_register_Asm_8:
7750   case ARM::VLD4LNdWB_register_Asm_16:
7751   case ARM::VLD4LNdWB_register_Asm_32:
7752   case ARM::VLD4LNqWB_register_Asm_16:
7753   case ARM::VLD4LNqWB_register_Asm_32: {
7754     MCInst TmpInst;
7755     // Shuffle the operands around so the lane index operand is in the
7756     // right place.
7757     unsigned Spacing;
7758     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7759     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7760     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7761                                             Spacing));
7762     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7763                                             Spacing * 2));
7764     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7765                                             Spacing * 3));
7766     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7767     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7768     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7769     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7770     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7771     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7772                                             Spacing));
7773     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7774                                             Spacing * 2));
7775     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7776                                             Spacing * 3));
7777     TmpInst.addOperand(Inst.getOperand(1)); // lane
7778     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7779     TmpInst.addOperand(Inst.getOperand(6));
7780     Inst = TmpInst;
7781     return true;
7782   }
7783 
7784   case ARM::VLD1LNdWB_fixed_Asm_8:
7785   case ARM::VLD1LNdWB_fixed_Asm_16:
7786   case ARM::VLD1LNdWB_fixed_Asm_32: {
7787     MCInst TmpInst;
7788     // Shuffle the operands around so the lane index operand is in the
7789     // right place.
7790     unsigned Spacing;
7791     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7792     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7793     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7794     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7795     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7796     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7797     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7798     TmpInst.addOperand(Inst.getOperand(1)); // lane
7799     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7800     TmpInst.addOperand(Inst.getOperand(5));
7801     Inst = TmpInst;
7802     return true;
7803   }
7804 
7805   case ARM::VLD2LNdWB_fixed_Asm_8:
7806   case ARM::VLD2LNdWB_fixed_Asm_16:
7807   case ARM::VLD2LNdWB_fixed_Asm_32:
7808   case ARM::VLD2LNqWB_fixed_Asm_16:
7809   case ARM::VLD2LNqWB_fixed_Asm_32: {
7810     MCInst TmpInst;
7811     // Shuffle the operands around so the lane index operand is in the
7812     // right place.
7813     unsigned Spacing;
7814     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7815     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7816     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7817                                             Spacing));
7818     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7819     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7820     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7821     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7822     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7823     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7824                                             Spacing));
7825     TmpInst.addOperand(Inst.getOperand(1)); // lane
7826     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7827     TmpInst.addOperand(Inst.getOperand(5));
7828     Inst = TmpInst;
7829     return true;
7830   }
7831 
7832   case ARM::VLD3LNdWB_fixed_Asm_8:
7833   case ARM::VLD3LNdWB_fixed_Asm_16:
7834   case ARM::VLD3LNdWB_fixed_Asm_32:
7835   case ARM::VLD3LNqWB_fixed_Asm_16:
7836   case ARM::VLD3LNqWB_fixed_Asm_32: {
7837     MCInst TmpInst;
7838     // Shuffle the operands around so the lane index operand is in the
7839     // right place.
7840     unsigned Spacing;
7841     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7842     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7843     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7844                                             Spacing));
7845     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7846                                             Spacing * 2));
7847     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7848     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7849     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7850     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7851     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7852     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7853                                             Spacing));
7854     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7855                                             Spacing * 2));
7856     TmpInst.addOperand(Inst.getOperand(1)); // lane
7857     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7858     TmpInst.addOperand(Inst.getOperand(5));
7859     Inst = TmpInst;
7860     return true;
7861   }
7862 
7863   case ARM::VLD4LNdWB_fixed_Asm_8:
7864   case ARM::VLD4LNdWB_fixed_Asm_16:
7865   case ARM::VLD4LNdWB_fixed_Asm_32:
7866   case ARM::VLD4LNqWB_fixed_Asm_16:
7867   case ARM::VLD4LNqWB_fixed_Asm_32: {
7868     MCInst TmpInst;
7869     // Shuffle the operands around so the lane index operand is in the
7870     // right place.
7871     unsigned Spacing;
7872     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7873     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7874     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7875                                             Spacing));
7876     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7877                                             Spacing * 2));
7878     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7879                                             Spacing * 3));
7880     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7881     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7882     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7883     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7884     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7885     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7886                                             Spacing));
7887     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7888                                             Spacing * 2));
7889     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7890                                             Spacing * 3));
7891     TmpInst.addOperand(Inst.getOperand(1)); // lane
7892     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7893     TmpInst.addOperand(Inst.getOperand(5));
7894     Inst = TmpInst;
7895     return true;
7896   }
7897 
7898   case ARM::VLD1LNdAsm_8:
7899   case ARM::VLD1LNdAsm_16:
7900   case ARM::VLD1LNdAsm_32: {
7901     MCInst TmpInst;
7902     // Shuffle the operands around so the lane index operand is in the
7903     // right place.
7904     unsigned Spacing;
7905     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7906     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7907     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7908     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7909     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7910     TmpInst.addOperand(Inst.getOperand(1)); // lane
7911     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7912     TmpInst.addOperand(Inst.getOperand(5));
7913     Inst = TmpInst;
7914     return true;
7915   }
7916 
7917   case ARM::VLD2LNdAsm_8:
7918   case ARM::VLD2LNdAsm_16:
7919   case ARM::VLD2LNdAsm_32:
7920   case ARM::VLD2LNqAsm_16:
7921   case ARM::VLD2LNqAsm_32: {
7922     MCInst TmpInst;
7923     // Shuffle the operands around so the lane index operand is in the
7924     // right place.
7925     unsigned Spacing;
7926     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7927     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7928     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7929                                             Spacing));
7930     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7931     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7932     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7933     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7934                                             Spacing));
7935     TmpInst.addOperand(Inst.getOperand(1)); // lane
7936     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7937     TmpInst.addOperand(Inst.getOperand(5));
7938     Inst = TmpInst;
7939     return true;
7940   }
7941 
7942   case ARM::VLD3LNdAsm_8:
7943   case ARM::VLD3LNdAsm_16:
7944   case ARM::VLD3LNdAsm_32:
7945   case ARM::VLD3LNqAsm_16:
7946   case ARM::VLD3LNqAsm_32: {
7947     MCInst TmpInst;
7948     // Shuffle the operands around so the lane index operand is in the
7949     // right place.
7950     unsigned Spacing;
7951     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7952     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7953     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7954                                             Spacing));
7955     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7956                                             Spacing * 2));
7957     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7958     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7959     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7960     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7961                                             Spacing));
7962     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7963                                             Spacing * 2));
7964     TmpInst.addOperand(Inst.getOperand(1)); // lane
7965     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7966     TmpInst.addOperand(Inst.getOperand(5));
7967     Inst = TmpInst;
7968     return true;
7969   }
7970 
7971   case ARM::VLD4LNdAsm_8:
7972   case ARM::VLD4LNdAsm_16:
7973   case ARM::VLD4LNdAsm_32:
7974   case ARM::VLD4LNqAsm_16:
7975   case ARM::VLD4LNqAsm_32: {
7976     MCInst TmpInst;
7977     // Shuffle the operands around so the lane index operand is in the
7978     // right place.
7979     unsigned Spacing;
7980     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7981     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7982     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7983                                             Spacing));
7984     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7985                                             Spacing * 2));
7986     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7987                                             Spacing * 3));
7988     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7989     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7990     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7991     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7992                                             Spacing));
7993     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7994                                             Spacing * 2));
7995     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7996                                             Spacing * 3));
7997     TmpInst.addOperand(Inst.getOperand(1)); // lane
7998     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7999     TmpInst.addOperand(Inst.getOperand(5));
8000     Inst = TmpInst;
8001     return true;
8002   }
8003 
8004   // VLD3DUP single 3-element structure to all lanes instructions.
8005   case ARM::VLD3DUPdAsm_8:
8006   case ARM::VLD3DUPdAsm_16:
8007   case ARM::VLD3DUPdAsm_32:
8008   case ARM::VLD3DUPqAsm_8:
8009   case ARM::VLD3DUPqAsm_16:
8010   case ARM::VLD3DUPqAsm_32: {
8011     MCInst TmpInst;
8012     unsigned Spacing;
8013     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8014     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8015     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8016                                             Spacing));
8017     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8018                                             Spacing * 2));
8019     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8020     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8021     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8022     TmpInst.addOperand(Inst.getOperand(4));
8023     Inst = TmpInst;
8024     return true;
8025   }
8026 
8027   case ARM::VLD3DUPdWB_fixed_Asm_8:
8028   case ARM::VLD3DUPdWB_fixed_Asm_16:
8029   case ARM::VLD3DUPdWB_fixed_Asm_32:
8030   case ARM::VLD3DUPqWB_fixed_Asm_8:
8031   case ARM::VLD3DUPqWB_fixed_Asm_16:
8032   case ARM::VLD3DUPqWB_fixed_Asm_32: {
8033     MCInst TmpInst;
8034     unsigned Spacing;
8035     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8036     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8037     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8038                                             Spacing));
8039     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8040                                             Spacing * 2));
8041     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8042     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8043     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8044     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8045     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8046     TmpInst.addOperand(Inst.getOperand(4));
8047     Inst = TmpInst;
8048     return true;
8049   }
8050 
8051   case ARM::VLD3DUPdWB_register_Asm_8:
8052   case ARM::VLD3DUPdWB_register_Asm_16:
8053   case ARM::VLD3DUPdWB_register_Asm_32:
8054   case ARM::VLD3DUPqWB_register_Asm_8:
8055   case ARM::VLD3DUPqWB_register_Asm_16:
8056   case ARM::VLD3DUPqWB_register_Asm_32: {
8057     MCInst TmpInst;
8058     unsigned Spacing;
8059     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8060     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8061     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8062                                             Spacing));
8063     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8064                                             Spacing * 2));
8065     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8066     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8067     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8068     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8069     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8070     TmpInst.addOperand(Inst.getOperand(5));
8071     Inst = TmpInst;
8072     return true;
8073   }
8074 
8075   // VLD3 multiple 3-element structure instructions.
8076   case ARM::VLD3dAsm_8:
8077   case ARM::VLD3dAsm_16:
8078   case ARM::VLD3dAsm_32:
8079   case ARM::VLD3qAsm_8:
8080   case ARM::VLD3qAsm_16:
8081   case ARM::VLD3qAsm_32: {
8082     MCInst TmpInst;
8083     unsigned Spacing;
8084     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8085     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8086     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8087                                             Spacing));
8088     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8089                                             Spacing * 2));
8090     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8091     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8092     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8093     TmpInst.addOperand(Inst.getOperand(4));
8094     Inst = TmpInst;
8095     return true;
8096   }
8097 
8098   case ARM::VLD3dWB_fixed_Asm_8:
8099   case ARM::VLD3dWB_fixed_Asm_16:
8100   case ARM::VLD3dWB_fixed_Asm_32:
8101   case ARM::VLD3qWB_fixed_Asm_8:
8102   case ARM::VLD3qWB_fixed_Asm_16:
8103   case ARM::VLD3qWB_fixed_Asm_32: {
8104     MCInst TmpInst;
8105     unsigned Spacing;
8106     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8107     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8108     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8109                                             Spacing));
8110     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8111                                             Spacing * 2));
8112     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8113     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8114     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8115     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8116     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8117     TmpInst.addOperand(Inst.getOperand(4));
8118     Inst = TmpInst;
8119     return true;
8120   }
8121 
8122   case ARM::VLD3dWB_register_Asm_8:
8123   case ARM::VLD3dWB_register_Asm_16:
8124   case ARM::VLD3dWB_register_Asm_32:
8125   case ARM::VLD3qWB_register_Asm_8:
8126   case ARM::VLD3qWB_register_Asm_16:
8127   case ARM::VLD3qWB_register_Asm_32: {
8128     MCInst TmpInst;
8129     unsigned Spacing;
8130     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8131     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8132     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8133                                             Spacing));
8134     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8135                                             Spacing * 2));
8136     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8137     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8138     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8139     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8140     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8141     TmpInst.addOperand(Inst.getOperand(5));
8142     Inst = TmpInst;
8143     return true;
8144   }
8145 
8146   // VLD4DUP single 3-element structure to all lanes instructions.
8147   case ARM::VLD4DUPdAsm_8:
8148   case ARM::VLD4DUPdAsm_16:
8149   case ARM::VLD4DUPdAsm_32:
8150   case ARM::VLD4DUPqAsm_8:
8151   case ARM::VLD4DUPqAsm_16:
8152   case ARM::VLD4DUPqAsm_32: {
8153     MCInst TmpInst;
8154     unsigned Spacing;
8155     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8156     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8157     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8158                                             Spacing));
8159     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8160                                             Spacing * 2));
8161     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8162                                             Spacing * 3));
8163     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8164     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8165     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8166     TmpInst.addOperand(Inst.getOperand(4));
8167     Inst = TmpInst;
8168     return true;
8169   }
8170 
8171   case ARM::VLD4DUPdWB_fixed_Asm_8:
8172   case ARM::VLD4DUPdWB_fixed_Asm_16:
8173   case ARM::VLD4DUPdWB_fixed_Asm_32:
8174   case ARM::VLD4DUPqWB_fixed_Asm_8:
8175   case ARM::VLD4DUPqWB_fixed_Asm_16:
8176   case ARM::VLD4DUPqWB_fixed_Asm_32: {
8177     MCInst TmpInst;
8178     unsigned Spacing;
8179     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8180     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8181     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8182                                             Spacing));
8183     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8184                                             Spacing * 2));
8185     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8186                                             Spacing * 3));
8187     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8188     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8189     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8190     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8191     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8192     TmpInst.addOperand(Inst.getOperand(4));
8193     Inst = TmpInst;
8194     return true;
8195   }
8196 
8197   case ARM::VLD4DUPdWB_register_Asm_8:
8198   case ARM::VLD4DUPdWB_register_Asm_16:
8199   case ARM::VLD4DUPdWB_register_Asm_32:
8200   case ARM::VLD4DUPqWB_register_Asm_8:
8201   case ARM::VLD4DUPqWB_register_Asm_16:
8202   case ARM::VLD4DUPqWB_register_Asm_32: {
8203     MCInst TmpInst;
8204     unsigned Spacing;
8205     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8206     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8207     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8208                                             Spacing));
8209     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8210                                             Spacing * 2));
8211     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8212                                             Spacing * 3));
8213     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8214     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8215     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8216     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8217     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8218     TmpInst.addOperand(Inst.getOperand(5));
8219     Inst = TmpInst;
8220     return true;
8221   }
8222 
8223   // VLD4 multiple 4-element structure instructions.
8224   case ARM::VLD4dAsm_8:
8225   case ARM::VLD4dAsm_16:
8226   case ARM::VLD4dAsm_32:
8227   case ARM::VLD4qAsm_8:
8228   case ARM::VLD4qAsm_16:
8229   case ARM::VLD4qAsm_32: {
8230     MCInst TmpInst;
8231     unsigned Spacing;
8232     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8233     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8234     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8235                                             Spacing));
8236     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8237                                             Spacing * 2));
8238     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8239                                             Spacing * 3));
8240     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8241     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8242     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8243     TmpInst.addOperand(Inst.getOperand(4));
8244     Inst = TmpInst;
8245     return true;
8246   }
8247 
8248   case ARM::VLD4dWB_fixed_Asm_8:
8249   case ARM::VLD4dWB_fixed_Asm_16:
8250   case ARM::VLD4dWB_fixed_Asm_32:
8251   case ARM::VLD4qWB_fixed_Asm_8:
8252   case ARM::VLD4qWB_fixed_Asm_16:
8253   case ARM::VLD4qWB_fixed_Asm_32: {
8254     MCInst TmpInst;
8255     unsigned Spacing;
8256     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8257     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8258     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8259                                             Spacing));
8260     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8261                                             Spacing * 2));
8262     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8263                                             Spacing * 3));
8264     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8265     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8266     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8267     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8268     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8269     TmpInst.addOperand(Inst.getOperand(4));
8270     Inst = TmpInst;
8271     return true;
8272   }
8273 
8274   case ARM::VLD4dWB_register_Asm_8:
8275   case ARM::VLD4dWB_register_Asm_16:
8276   case ARM::VLD4dWB_register_Asm_32:
8277   case ARM::VLD4qWB_register_Asm_8:
8278   case ARM::VLD4qWB_register_Asm_16:
8279   case ARM::VLD4qWB_register_Asm_32: {
8280     MCInst TmpInst;
8281     unsigned Spacing;
8282     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8283     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8284     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8285                                             Spacing));
8286     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8287                                             Spacing * 2));
8288     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8289                                             Spacing * 3));
8290     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8291     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8292     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8293     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8294     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8295     TmpInst.addOperand(Inst.getOperand(5));
8296     Inst = TmpInst;
8297     return true;
8298   }
8299 
8300   // VST3 multiple 3-element structure instructions.
8301   case ARM::VST3dAsm_8:
8302   case ARM::VST3dAsm_16:
8303   case ARM::VST3dAsm_32:
8304   case ARM::VST3qAsm_8:
8305   case ARM::VST3qAsm_16:
8306   case ARM::VST3qAsm_32: {
8307     MCInst TmpInst;
8308     unsigned Spacing;
8309     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8310     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8311     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8312     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8313     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8314                                             Spacing));
8315     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8316                                             Spacing * 2));
8317     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8318     TmpInst.addOperand(Inst.getOperand(4));
8319     Inst = TmpInst;
8320     return true;
8321   }
8322 
8323   case ARM::VST3dWB_fixed_Asm_8:
8324   case ARM::VST3dWB_fixed_Asm_16:
8325   case ARM::VST3dWB_fixed_Asm_32:
8326   case ARM::VST3qWB_fixed_Asm_8:
8327   case ARM::VST3qWB_fixed_Asm_16:
8328   case ARM::VST3qWB_fixed_Asm_32: {
8329     MCInst TmpInst;
8330     unsigned Spacing;
8331     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8332     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8333     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8334     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8335     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8336     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8337     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8338                                             Spacing));
8339     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8340                                             Spacing * 2));
8341     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8342     TmpInst.addOperand(Inst.getOperand(4));
8343     Inst = TmpInst;
8344     return true;
8345   }
8346 
8347   case ARM::VST3dWB_register_Asm_8:
8348   case ARM::VST3dWB_register_Asm_16:
8349   case ARM::VST3dWB_register_Asm_32:
8350   case ARM::VST3qWB_register_Asm_8:
8351   case ARM::VST3qWB_register_Asm_16:
8352   case ARM::VST3qWB_register_Asm_32: {
8353     MCInst TmpInst;
8354     unsigned Spacing;
8355     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8356     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8357     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8358     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8359     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8360     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8361     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8362                                             Spacing));
8363     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8364                                             Spacing * 2));
8365     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8366     TmpInst.addOperand(Inst.getOperand(5));
8367     Inst = TmpInst;
8368     return true;
8369   }
8370 
8371   // VST4 multiple 3-element structure instructions.
8372   case ARM::VST4dAsm_8:
8373   case ARM::VST4dAsm_16:
8374   case ARM::VST4dAsm_32:
8375   case ARM::VST4qAsm_8:
8376   case ARM::VST4qAsm_16:
8377   case ARM::VST4qAsm_32: {
8378     MCInst TmpInst;
8379     unsigned Spacing;
8380     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8381     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8382     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8383     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8384     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8385                                             Spacing));
8386     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8387                                             Spacing * 2));
8388     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8389                                             Spacing * 3));
8390     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8391     TmpInst.addOperand(Inst.getOperand(4));
8392     Inst = TmpInst;
8393     return true;
8394   }
8395 
8396   case ARM::VST4dWB_fixed_Asm_8:
8397   case ARM::VST4dWB_fixed_Asm_16:
8398   case ARM::VST4dWB_fixed_Asm_32:
8399   case ARM::VST4qWB_fixed_Asm_8:
8400   case ARM::VST4qWB_fixed_Asm_16:
8401   case ARM::VST4qWB_fixed_Asm_32: {
8402     MCInst TmpInst;
8403     unsigned Spacing;
8404     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8405     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8406     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8407     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8408     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8409     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8410     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8411                                             Spacing));
8412     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8413                                             Spacing * 2));
8414     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8415                                             Spacing * 3));
8416     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8417     TmpInst.addOperand(Inst.getOperand(4));
8418     Inst = TmpInst;
8419     return true;
8420   }
8421 
8422   case ARM::VST4dWB_register_Asm_8:
8423   case ARM::VST4dWB_register_Asm_16:
8424   case ARM::VST4dWB_register_Asm_32:
8425   case ARM::VST4qWB_register_Asm_8:
8426   case ARM::VST4qWB_register_Asm_16:
8427   case ARM::VST4qWB_register_Asm_32: {
8428     MCInst TmpInst;
8429     unsigned Spacing;
8430     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8431     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8432     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8433     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8434     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8435     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8436     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8437                                             Spacing));
8438     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8439                                             Spacing * 2));
8440     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8441                                             Spacing * 3));
8442     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8443     TmpInst.addOperand(Inst.getOperand(5));
8444     Inst = TmpInst;
8445     return true;
8446   }
8447 
8448   // Handle encoding choice for the shift-immediate instructions.
8449   case ARM::t2LSLri:
8450   case ARM::t2LSRri:
8451   case ARM::t2ASRri:
8452     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8453         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8454         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8455         !HasWideQualifier) {
8456       unsigned NewOpc;
8457       switch (Inst.getOpcode()) {
8458       default: llvm_unreachable("unexpected opcode");
8459       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
8460       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
8461       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
8462       }
8463       // The Thumb1 operands aren't in the same order. Awesome, eh?
8464       MCInst TmpInst;
8465       TmpInst.setOpcode(NewOpc);
8466       TmpInst.addOperand(Inst.getOperand(0));
8467       TmpInst.addOperand(Inst.getOperand(5));
8468       TmpInst.addOperand(Inst.getOperand(1));
8469       TmpInst.addOperand(Inst.getOperand(2));
8470       TmpInst.addOperand(Inst.getOperand(3));
8471       TmpInst.addOperand(Inst.getOperand(4));
8472       Inst = TmpInst;
8473       return true;
8474     }
8475     return false;
8476 
8477   // Handle the Thumb2 mode MOV complex aliases.
8478   case ARM::t2MOVsr:
8479   case ARM::t2MOVSsr: {
8480     // Which instruction to expand to depends on the CCOut operand and
8481     // whether we're in an IT block if the register operands are low
8482     // registers.
8483     bool isNarrow = false;
8484     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8485         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8486         isARMLowRegister(Inst.getOperand(2).getReg()) &&
8487         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8488         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
8489         !HasWideQualifier)
8490       isNarrow = true;
8491     MCInst TmpInst;
8492     unsigned newOpc;
8493     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
8494     default: llvm_unreachable("unexpected opcode!");
8495     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
8496     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
8497     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
8498     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
8499     }
8500     TmpInst.setOpcode(newOpc);
8501     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8502     if (isNarrow)
8503       TmpInst.addOperand(MCOperand::createReg(
8504           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
8505     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8506     TmpInst.addOperand(Inst.getOperand(2)); // Rm
8507     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8508     TmpInst.addOperand(Inst.getOperand(5));
8509     if (!isNarrow)
8510       TmpInst.addOperand(MCOperand::createReg(
8511           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
8512     Inst = TmpInst;
8513     return true;
8514   }
8515   case ARM::t2MOVsi:
8516   case ARM::t2MOVSsi: {
8517     // Which instruction to expand to depends on the CCOut operand and
8518     // whether we're in an IT block if the register operands are low
8519     // registers.
8520     bool isNarrow = false;
8521     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8522         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8523         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
8524         !HasWideQualifier)
8525       isNarrow = true;
8526     MCInst TmpInst;
8527     unsigned newOpc;
8528     unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
8529     unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
8530     bool isMov = false;
8531     // MOV rd, rm, LSL #0 is actually a MOV instruction
8532     if (Shift == ARM_AM::lsl && Amount == 0) {
8533       isMov = true;
8534       // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
8535       // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
8536       // unpredictable in an IT block so the 32-bit encoding T3 has to be used
8537       // instead.
8538       if (inITBlock()) {
8539         isNarrow = false;
8540       }
8541       newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
8542     } else {
8543       switch(Shift) {
8544       default: llvm_unreachable("unexpected opcode!");
8545       case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
8546       case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
8547       case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
8548       case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
8549       case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
8550       }
8551     }
8552     if (Amount == 32) Amount = 0;
8553     TmpInst.setOpcode(newOpc);
8554     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8555     if (isNarrow && !isMov)
8556       TmpInst.addOperand(MCOperand::createReg(
8557           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
8558     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8559     if (newOpc != ARM::t2RRX && !isMov)
8560       TmpInst.addOperand(MCOperand::createImm(Amount));
8561     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8562     TmpInst.addOperand(Inst.getOperand(4));
8563     if (!isNarrow)
8564       TmpInst.addOperand(MCOperand::createReg(
8565           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
8566     Inst = TmpInst;
8567     return true;
8568   }
8569   // Handle the ARM mode MOV complex aliases.
8570   case ARM::ASRr:
8571   case ARM::LSRr:
8572   case ARM::LSLr:
8573   case ARM::RORr: {
8574     ARM_AM::ShiftOpc ShiftTy;
8575     switch(Inst.getOpcode()) {
8576     default: llvm_unreachable("unexpected opcode!");
8577     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
8578     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
8579     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
8580     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
8581     }
8582     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
8583     MCInst TmpInst;
8584     TmpInst.setOpcode(ARM::MOVsr);
8585     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8586     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8587     TmpInst.addOperand(Inst.getOperand(2)); // Rm
8588     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8589     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8590     TmpInst.addOperand(Inst.getOperand(4));
8591     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
8592     Inst = TmpInst;
8593     return true;
8594   }
8595   case ARM::ASRi:
8596   case ARM::LSRi:
8597   case ARM::LSLi:
8598   case ARM::RORi: {
8599     ARM_AM::ShiftOpc ShiftTy;
8600     switch(Inst.getOpcode()) {
8601     default: llvm_unreachable("unexpected opcode!");
8602     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
8603     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
8604     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
8605     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
8606     }
8607     // A shift by zero is a plain MOVr, not a MOVsi.
8608     unsigned Amt = Inst.getOperand(2).getImm();
8609     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
8610     // A shift by 32 should be encoded as 0 when permitted
8611     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
8612       Amt = 0;
8613     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
8614     MCInst TmpInst;
8615     TmpInst.setOpcode(Opc);
8616     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8617     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8618     if (Opc == ARM::MOVsi)
8619       TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8620     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8621     TmpInst.addOperand(Inst.getOperand(4));
8622     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
8623     Inst = TmpInst;
8624     return true;
8625   }
8626   case ARM::RRXi: {
8627     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
8628     MCInst TmpInst;
8629     TmpInst.setOpcode(ARM::MOVsi);
8630     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8631     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8632     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8633     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8634     TmpInst.addOperand(Inst.getOperand(3));
8635     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
8636     Inst = TmpInst;
8637     return true;
8638   }
8639   case ARM::t2LDMIA_UPD: {
8640     // If this is a load of a single register, then we should use
8641     // a post-indexed LDR instruction instead, per the ARM ARM.
8642     if (Inst.getNumOperands() != 5)
8643       return false;
8644     MCInst TmpInst;
8645     TmpInst.setOpcode(ARM::t2LDR_POST);
8646     TmpInst.addOperand(Inst.getOperand(4)); // Rt
8647     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8648     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8649     TmpInst.addOperand(MCOperand::createImm(4));
8650     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8651     TmpInst.addOperand(Inst.getOperand(3));
8652     Inst = TmpInst;
8653     return true;
8654   }
8655   case ARM::t2STMDB_UPD: {
8656     // If this is a store of a single register, then we should use
8657     // a pre-indexed STR instruction instead, per the ARM ARM.
8658     if (Inst.getNumOperands() != 5)
8659       return false;
8660     MCInst TmpInst;
8661     TmpInst.setOpcode(ARM::t2STR_PRE);
8662     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8663     TmpInst.addOperand(Inst.getOperand(4)); // Rt
8664     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8665     TmpInst.addOperand(MCOperand::createImm(-4));
8666     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8667     TmpInst.addOperand(Inst.getOperand(3));
8668     Inst = TmpInst;
8669     return true;
8670   }
8671   case ARM::LDMIA_UPD:
8672     // If this is a load of a single register via a 'pop', then we should use
8673     // a post-indexed LDR instruction instead, per the ARM ARM.
8674     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
8675         Inst.getNumOperands() == 5) {
8676       MCInst TmpInst;
8677       TmpInst.setOpcode(ARM::LDR_POST_IMM);
8678       TmpInst.addOperand(Inst.getOperand(4)); // Rt
8679       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8680       TmpInst.addOperand(Inst.getOperand(1)); // Rn
8681       TmpInst.addOperand(MCOperand::createReg(0));  // am2offset
8682       TmpInst.addOperand(MCOperand::createImm(4));
8683       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8684       TmpInst.addOperand(Inst.getOperand(3));
8685       Inst = TmpInst;
8686       return true;
8687     }
8688     break;
8689   case ARM::STMDB_UPD:
8690     // If this is a store of a single register via a 'push', then we should use
8691     // a pre-indexed STR instruction instead, per the ARM ARM.
8692     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
8693         Inst.getNumOperands() == 5) {
8694       MCInst TmpInst;
8695       TmpInst.setOpcode(ARM::STR_PRE_IMM);
8696       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8697       TmpInst.addOperand(Inst.getOperand(4)); // Rt
8698       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
8699       TmpInst.addOperand(MCOperand::createImm(-4));
8700       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8701       TmpInst.addOperand(Inst.getOperand(3));
8702       Inst = TmpInst;
8703     }
8704     break;
8705   case ARM::t2ADDri12:
8706     // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
8707     // mnemonic was used (not "addw"), encoding T3 is preferred.
8708     if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" ||
8709         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8710       break;
8711     Inst.setOpcode(ARM::t2ADDri);
8712     Inst.addOperand(MCOperand::createReg(0)); // cc_out
8713     break;
8714   case ARM::t2SUBri12:
8715     // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
8716     // mnemonic was used (not "subw"), encoding T3 is preferred.
8717     if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" ||
8718         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8719       break;
8720     Inst.setOpcode(ARM::t2SUBri);
8721     Inst.addOperand(MCOperand::createReg(0)); // cc_out
8722     break;
8723   case ARM::tADDi8:
8724     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8725     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8726     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8727     // to encoding T1 if <Rd> is omitted."
8728     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8729       Inst.setOpcode(ARM::tADDi3);
8730       return true;
8731     }
8732     break;
8733   case ARM::tSUBi8:
8734     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8735     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8736     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8737     // to encoding T1 if <Rd> is omitted."
8738     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8739       Inst.setOpcode(ARM::tSUBi3);
8740       return true;
8741     }
8742     break;
8743   case ARM::t2ADDri:
8744   case ARM::t2SUBri: {
8745     // If the destination and first source operand are the same, and
8746     // the flags are compatible with the current IT status, use encoding T2
8747     // instead of T3. For compatibility with the system 'as'. Make sure the
8748     // wide encoding wasn't explicit.
8749     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
8750         !isARMLowRegister(Inst.getOperand(0).getReg()) ||
8751         (Inst.getOperand(2).isImm() &&
8752          (unsigned)Inst.getOperand(2).getImm() > 255) ||
8753         Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
8754         HasWideQualifier)
8755       break;
8756     MCInst TmpInst;
8757     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
8758                       ARM::tADDi8 : ARM::tSUBi8);
8759     TmpInst.addOperand(Inst.getOperand(0));
8760     TmpInst.addOperand(Inst.getOperand(5));
8761     TmpInst.addOperand(Inst.getOperand(0));
8762     TmpInst.addOperand(Inst.getOperand(2));
8763     TmpInst.addOperand(Inst.getOperand(3));
8764     TmpInst.addOperand(Inst.getOperand(4));
8765     Inst = TmpInst;
8766     return true;
8767   }
8768   case ARM::t2ADDrr: {
8769     // If the destination and first source operand are the same, and
8770     // there's no setting of the flags, use encoding T2 instead of T3.
8771     // Note that this is only for ADD, not SUB. This mirrors the system
8772     // 'as' behaviour.  Also take advantage of ADD being commutative.
8773     // Make sure the wide encoding wasn't explicit.
8774     bool Swap = false;
8775     auto DestReg = Inst.getOperand(0).getReg();
8776     bool Transform = DestReg == Inst.getOperand(1).getReg();
8777     if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
8778       Transform = true;
8779       Swap = true;
8780     }
8781     if (!Transform ||
8782         Inst.getOperand(5).getReg() != 0 ||
8783         HasWideQualifier)
8784       break;
8785     MCInst TmpInst;
8786     TmpInst.setOpcode(ARM::tADDhirr);
8787     TmpInst.addOperand(Inst.getOperand(0));
8788     TmpInst.addOperand(Inst.getOperand(0));
8789     TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
8790     TmpInst.addOperand(Inst.getOperand(3));
8791     TmpInst.addOperand(Inst.getOperand(4));
8792     Inst = TmpInst;
8793     return true;
8794   }
8795   case ARM::tADDrSP:
8796     // If the non-SP source operand and the destination operand are not the
8797     // same, we need to use the 32-bit encoding if it's available.
8798     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8799       Inst.setOpcode(ARM::t2ADDrr);
8800       Inst.addOperand(MCOperand::createReg(0)); // cc_out
8801       return true;
8802     }
8803     break;
8804   case ARM::tB:
8805     // A Thumb conditional branch outside of an IT block is a tBcc.
8806     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
8807       Inst.setOpcode(ARM::tBcc);
8808       return true;
8809     }
8810     break;
8811   case ARM::t2B:
8812     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
8813     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
8814       Inst.setOpcode(ARM::t2Bcc);
8815       return true;
8816     }
8817     break;
8818   case ARM::t2Bcc:
8819     // If the conditional is AL or we're in an IT block, we really want t2B.
8820     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
8821       Inst.setOpcode(ARM::t2B);
8822       return true;
8823     }
8824     break;
8825   case ARM::tBcc:
8826     // If the conditional is AL, we really want tB.
8827     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
8828       Inst.setOpcode(ARM::tB);
8829       return true;
8830     }
8831     break;
8832   case ARM::tLDMIA: {
8833     // If the register list contains any high registers, or if the writeback
8834     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
8835     // instead if we're in Thumb2. Otherwise, this should have generated
8836     // an error in validateInstruction().
8837     unsigned Rn = Inst.getOperand(0).getReg();
8838     bool hasWritebackToken =
8839         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8840          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
8841     bool listContainsBase;
8842     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
8843         (!listContainsBase && !hasWritebackToken) ||
8844         (listContainsBase && hasWritebackToken)) {
8845       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8846       assert(isThumbTwo());
8847       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
8848       // If we're switching to the updating version, we need to insert
8849       // the writeback tied operand.
8850       if (hasWritebackToken)
8851         Inst.insert(Inst.begin(),
8852                     MCOperand::createReg(Inst.getOperand(0).getReg()));
8853       return true;
8854     }
8855     break;
8856   }
8857   case ARM::tSTMIA_UPD: {
8858     // If the register list contains any high registers, we need to use
8859     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8860     // should have generated an error in validateInstruction().
8861     unsigned Rn = Inst.getOperand(0).getReg();
8862     bool listContainsBase;
8863     if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
8864       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8865       assert(isThumbTwo());
8866       Inst.setOpcode(ARM::t2STMIA_UPD);
8867       return true;
8868     }
8869     break;
8870   }
8871   case ARM::tPOP: {
8872     bool listContainsBase;
8873     // If the register list contains any high registers, we need to use
8874     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8875     // should have generated an error in validateInstruction().
8876     if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
8877       return false;
8878     assert(isThumbTwo());
8879     Inst.setOpcode(ARM::t2LDMIA_UPD);
8880     // Add the base register and writeback operands.
8881     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8882     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8883     return true;
8884   }
8885   case ARM::tPUSH: {
8886     bool listContainsBase;
8887     if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
8888       return false;
8889     assert(isThumbTwo());
8890     Inst.setOpcode(ARM::t2STMDB_UPD);
8891     // Add the base register and writeback operands.
8892     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8893     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8894     return true;
8895   }
8896   case ARM::t2MOVi:
8897     // If we can use the 16-bit encoding and the user didn't explicitly
8898     // request the 32-bit variant, transform it here.
8899     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8900         (Inst.getOperand(1).isImm() &&
8901          (unsigned)Inst.getOperand(1).getImm() <= 255) &&
8902         Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8903         !HasWideQualifier) {
8904       // The operands aren't in the same order for tMOVi8...
8905       MCInst TmpInst;
8906       TmpInst.setOpcode(ARM::tMOVi8);
8907       TmpInst.addOperand(Inst.getOperand(0));
8908       TmpInst.addOperand(Inst.getOperand(4));
8909       TmpInst.addOperand(Inst.getOperand(1));
8910       TmpInst.addOperand(Inst.getOperand(2));
8911       TmpInst.addOperand(Inst.getOperand(3));
8912       Inst = TmpInst;
8913       return true;
8914     }
8915     break;
8916 
8917   case ARM::t2MOVr:
8918     // If we can use the 16-bit encoding and the user didn't explicitly
8919     // request the 32-bit variant, transform it here.
8920     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8921         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8922         Inst.getOperand(2).getImm() == ARMCC::AL &&
8923         Inst.getOperand(4).getReg() == ARM::CPSR &&
8924         !HasWideQualifier) {
8925       // The operands aren't the same for tMOV[S]r... (no cc_out)
8926       MCInst TmpInst;
8927       TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
8928       TmpInst.addOperand(Inst.getOperand(0));
8929       TmpInst.addOperand(Inst.getOperand(1));
8930       TmpInst.addOperand(Inst.getOperand(2));
8931       TmpInst.addOperand(Inst.getOperand(3));
8932       Inst = TmpInst;
8933       return true;
8934     }
8935     break;
8936 
8937   case ARM::t2SXTH:
8938   case ARM::t2SXTB:
8939   case ARM::t2UXTH:
8940   case ARM::t2UXTB:
8941     // If we can use the 16-bit encoding and the user didn't explicitly
8942     // request the 32-bit variant, transform it here.
8943     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8944         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8945         Inst.getOperand(2).getImm() == 0 &&
8946         !HasWideQualifier) {
8947       unsigned NewOpc;
8948       switch (Inst.getOpcode()) {
8949       default: llvm_unreachable("Illegal opcode!");
8950       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
8951       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
8952       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
8953       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
8954       }
8955       // The operands aren't the same for thumb1 (no rotate operand).
8956       MCInst TmpInst;
8957       TmpInst.setOpcode(NewOpc);
8958       TmpInst.addOperand(Inst.getOperand(0));
8959       TmpInst.addOperand(Inst.getOperand(1));
8960       TmpInst.addOperand(Inst.getOperand(3));
8961       TmpInst.addOperand(Inst.getOperand(4));
8962       Inst = TmpInst;
8963       return true;
8964     }
8965     break;
8966 
8967   case ARM::MOVsi: {
8968     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
8969     // rrx shifts and asr/lsr of #32 is encoded as 0
8970     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
8971       return false;
8972     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
8973       // Shifting by zero is accepted as a vanilla 'MOVr'
8974       MCInst TmpInst;
8975       TmpInst.setOpcode(ARM::MOVr);
8976       TmpInst.addOperand(Inst.getOperand(0));
8977       TmpInst.addOperand(Inst.getOperand(1));
8978       TmpInst.addOperand(Inst.getOperand(3));
8979       TmpInst.addOperand(Inst.getOperand(4));
8980       TmpInst.addOperand(Inst.getOperand(5));
8981       Inst = TmpInst;
8982       return true;
8983     }
8984     return false;
8985   }
8986   case ARM::ANDrsi:
8987   case ARM::ORRrsi:
8988   case ARM::EORrsi:
8989   case ARM::BICrsi:
8990   case ARM::SUBrsi:
8991   case ARM::ADDrsi: {
8992     unsigned newOpc;
8993     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
8994     if (SOpc == ARM_AM::rrx) return false;
8995     switch (Inst.getOpcode()) {
8996     default: llvm_unreachable("unexpected opcode!");
8997     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
8998     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
8999     case ARM::EORrsi: newOpc = ARM::EORrr; break;
9000     case ARM::BICrsi: newOpc = ARM::BICrr; break;
9001     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
9002     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
9003     }
9004     // If the shift is by zero, use the non-shifted instruction definition.
9005     // The exception is for right shifts, where 0 == 32
9006     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
9007         !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
9008       MCInst TmpInst;
9009       TmpInst.setOpcode(newOpc);
9010       TmpInst.addOperand(Inst.getOperand(0));
9011       TmpInst.addOperand(Inst.getOperand(1));
9012       TmpInst.addOperand(Inst.getOperand(2));
9013       TmpInst.addOperand(Inst.getOperand(4));
9014       TmpInst.addOperand(Inst.getOperand(5));
9015       TmpInst.addOperand(Inst.getOperand(6));
9016       Inst = TmpInst;
9017       return true;
9018     }
9019     return false;
9020   }
9021   case ARM::ITasm:
9022   case ARM::t2IT: {
9023     MCOperand &MO = Inst.getOperand(1);
9024     unsigned Mask = MO.getImm();
9025     ARMCC::CondCodes Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
9026 
9027     // Set up the IT block state according to the IT instruction we just
9028     // matched.
9029     assert(!inITBlock() && "nested IT blocks?!");
9030     startExplicitITBlock(Cond, Mask);
9031     MO.setImm(getITMaskEncoding());
9032     break;
9033   }
9034   case ARM::t2LSLrr:
9035   case ARM::t2LSRrr:
9036   case ARM::t2ASRrr:
9037   case ARM::t2SBCrr:
9038   case ARM::t2RORrr:
9039   case ARM::t2BICrr:
9040     // Assemblers should use the narrow encodings of these instructions when permissible.
9041     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
9042          isARMLowRegister(Inst.getOperand(2).getReg())) &&
9043         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
9044         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
9045         !HasWideQualifier) {
9046       unsigned NewOpc;
9047       switch (Inst.getOpcode()) {
9048         default: llvm_unreachable("unexpected opcode");
9049         case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
9050         case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
9051         case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
9052         case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
9053         case ARM::t2RORrr: NewOpc = ARM::tROR; break;
9054         case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
9055       }
9056       MCInst TmpInst;
9057       TmpInst.setOpcode(NewOpc);
9058       TmpInst.addOperand(Inst.getOperand(0));
9059       TmpInst.addOperand(Inst.getOperand(5));
9060       TmpInst.addOperand(Inst.getOperand(1));
9061       TmpInst.addOperand(Inst.getOperand(2));
9062       TmpInst.addOperand(Inst.getOperand(3));
9063       TmpInst.addOperand(Inst.getOperand(4));
9064       Inst = TmpInst;
9065       return true;
9066     }
9067     return false;
9068 
9069   case ARM::t2ANDrr:
9070   case ARM::t2EORrr:
9071   case ARM::t2ADCrr:
9072   case ARM::t2ORRrr:
9073     // Assemblers should use the narrow encodings of these instructions when permissible.
9074     // These instructions are special in that they are commutable, so shorter encodings
9075     // are available more often.
9076     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
9077          isARMLowRegister(Inst.getOperand(2).getReg())) &&
9078         (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
9079          Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
9080         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
9081         !HasWideQualifier) {
9082       unsigned NewOpc;
9083       switch (Inst.getOpcode()) {
9084         default: llvm_unreachable("unexpected opcode");
9085         case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
9086         case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
9087         case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
9088         case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
9089       }
9090       MCInst TmpInst;
9091       TmpInst.setOpcode(NewOpc);
9092       TmpInst.addOperand(Inst.getOperand(0));
9093       TmpInst.addOperand(Inst.getOperand(5));
9094       if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
9095         TmpInst.addOperand(Inst.getOperand(1));
9096         TmpInst.addOperand(Inst.getOperand(2));
9097       } else {
9098         TmpInst.addOperand(Inst.getOperand(2));
9099         TmpInst.addOperand(Inst.getOperand(1));
9100       }
9101       TmpInst.addOperand(Inst.getOperand(3));
9102       TmpInst.addOperand(Inst.getOperand(4));
9103       Inst = TmpInst;
9104       return true;
9105     }
9106     return false;
9107   }
9108   return false;
9109 }
9110 
9111 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
9112   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
9113   // suffix depending on whether they're in an IT block or not.
9114   unsigned Opc = Inst.getOpcode();
9115   const MCInstrDesc &MCID = MII.get(Opc);
9116   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
9117     assert(MCID.hasOptionalDef() &&
9118            "optionally flag setting instruction missing optional def operand");
9119     assert(MCID.NumOperands == Inst.getNumOperands() &&
9120            "operand count mismatch!");
9121     // Find the optional-def operand (cc_out).
9122     unsigned OpNo;
9123     for (OpNo = 0;
9124          !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
9125          ++OpNo)
9126       ;
9127     // If we're parsing Thumb1, reject it completely.
9128     if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
9129       return Match_RequiresFlagSetting;
9130     // If we're parsing Thumb2, which form is legal depends on whether we're
9131     // in an IT block.
9132     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
9133         !inITBlock())
9134       return Match_RequiresITBlock;
9135     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
9136         inITBlock())
9137       return Match_RequiresNotITBlock;
9138     // LSL with zero immediate is not allowed in an IT block
9139     if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
9140       return Match_RequiresNotITBlock;
9141   } else if (isThumbOne()) {
9142     // Some high-register supporting Thumb1 encodings only allow both registers
9143     // to be from r0-r7 when in Thumb2.
9144     if (Opc == ARM::tADDhirr && !hasV6MOps() &&
9145         isARMLowRegister(Inst.getOperand(1).getReg()) &&
9146         isARMLowRegister(Inst.getOperand(2).getReg()))
9147       return Match_RequiresThumb2;
9148     // Others only require ARMv6 or later.
9149     else if (Opc == ARM::tMOVr && !hasV6Ops() &&
9150              isARMLowRegister(Inst.getOperand(0).getReg()) &&
9151              isARMLowRegister(Inst.getOperand(1).getReg()))
9152       return Match_RequiresV6;
9153   }
9154 
9155   // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
9156   // than the loop below can handle, so it uses the GPRnopc register class and
9157   // we do SP handling here.
9158   if (Opc == ARM::t2MOVr && !hasV8Ops())
9159   {
9160     // SP as both source and destination is not allowed
9161     if (Inst.getOperand(0).getReg() == ARM::SP &&
9162         Inst.getOperand(1).getReg() == ARM::SP)
9163       return Match_RequiresV8;
9164     // When flags-setting SP as either source or destination is not allowed
9165     if (Inst.getOperand(4).getReg() == ARM::CPSR &&
9166         (Inst.getOperand(0).getReg() == ARM::SP ||
9167          Inst.getOperand(1).getReg() == ARM::SP))
9168       return Match_RequiresV8;
9169   }
9170 
9171   // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
9172   // ARMv8-A.
9173   if ((Inst.getOpcode() == ARM::VMRS || Inst.getOpcode() == ARM::VMSR) &&
9174       Inst.getOperand(0).getReg() == ARM::SP && (isThumb() && !hasV8Ops()))
9175     return Match_InvalidOperand;
9176 
9177   for (unsigned I = 0; I < MCID.NumOperands; ++I)
9178     if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
9179       // rGPRRegClass excludes PC, and also excluded SP before ARMv8
9180       if ((Inst.getOperand(I).getReg() == ARM::SP) && !hasV8Ops())
9181         return Match_RequiresV8;
9182       else if (Inst.getOperand(I).getReg() == ARM::PC)
9183         return Match_InvalidOperand;
9184     }
9185 
9186   return Match_Success;
9187 }
9188 
9189 namespace llvm {
9190 
9191 template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
9192   return true; // In an assembly source, no need to second-guess
9193 }
9194 
9195 } // end namespace llvm
9196 
9197 // Returns true if Inst is unpredictable if it is in and IT block, but is not
9198 // the last instruction in the block.
9199 bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
9200   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9201 
9202   // All branch & call instructions terminate IT blocks with the exception of
9203   // SVC.
9204   if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
9205       MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
9206     return true;
9207 
9208   // Any arithmetic instruction which writes to the PC also terminates the IT
9209   // block.
9210   if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI))
9211     return true;
9212 
9213   return false;
9214 }
9215 
9216 unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
9217                                           SmallVectorImpl<NearMissInfo> &NearMisses,
9218                                           bool MatchingInlineAsm,
9219                                           bool &EmitInITBlock,
9220                                           MCStreamer &Out) {
9221   // If we can't use an implicit IT block here, just match as normal.
9222   if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
9223     return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
9224 
9225   // Try to match the instruction in an extension of the current IT block (if
9226   // there is one).
9227   if (inImplicitITBlock()) {
9228     extendImplicitITBlock(ITState.Cond);
9229     if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
9230             Match_Success) {
9231       // The match succeded, but we still have to check that the instruction is
9232       // valid in this implicit IT block.
9233       const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9234       if (MCID.isPredicable()) {
9235         ARMCC::CondCodes InstCond =
9236             (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9237                 .getImm();
9238         ARMCC::CondCodes ITCond = currentITCond();
9239         if (InstCond == ITCond) {
9240           EmitInITBlock = true;
9241           return Match_Success;
9242         } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
9243           invertCurrentITCondition();
9244           EmitInITBlock = true;
9245           return Match_Success;
9246         }
9247       }
9248     }
9249     rewindImplicitITPosition();
9250   }
9251 
9252   // Finish the current IT block, and try to match outside any IT block.
9253   flushPendingInstructions(Out);
9254   unsigned PlainMatchResult =
9255       MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
9256   if (PlainMatchResult == Match_Success) {
9257     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9258     if (MCID.isPredicable()) {
9259       ARMCC::CondCodes InstCond =
9260           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9261               .getImm();
9262       // Some forms of the branch instruction have their own condition code
9263       // fields, so can be conditionally executed without an IT block.
9264       if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
9265         EmitInITBlock = false;
9266         return Match_Success;
9267       }
9268       if (InstCond == ARMCC::AL) {
9269         EmitInITBlock = false;
9270         return Match_Success;
9271       }
9272     } else {
9273       EmitInITBlock = false;
9274       return Match_Success;
9275     }
9276   }
9277 
9278   // Try to match in a new IT block. The matcher doesn't check the actual
9279   // condition, so we create an IT block with a dummy condition, and fix it up
9280   // once we know the actual condition.
9281   startImplicitITBlock();
9282   if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
9283       Match_Success) {
9284     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9285     if (MCID.isPredicable()) {
9286       ITState.Cond =
9287           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9288               .getImm();
9289       EmitInITBlock = true;
9290       return Match_Success;
9291     }
9292   }
9293   discardImplicitITBlock();
9294 
9295   // If none of these succeed, return the error we got when trying to match
9296   // outside any IT blocks.
9297   EmitInITBlock = false;
9298   return PlainMatchResult;
9299 }
9300 
9301 static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS,
9302                                          unsigned VariantID = 0);
9303 
9304 static const char *getSubtargetFeatureName(uint64_t Val);
9305 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
9306                                            OperandVector &Operands,
9307                                            MCStreamer &Out, uint64_t &ErrorInfo,
9308                                            bool MatchingInlineAsm) {
9309   MCInst Inst;
9310   unsigned MatchResult;
9311   bool PendConditionalInstruction = false;
9312 
9313   SmallVector<NearMissInfo, 4> NearMisses;
9314   MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
9315                                  PendConditionalInstruction, Out);
9316 
9317   switch (MatchResult) {
9318   case Match_Success:
9319     LLVM_DEBUG(dbgs() << "Parsed as: ";
9320                Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
9321                dbgs() << "\n");
9322 
9323     // Context sensitive operand constraints aren't handled by the matcher,
9324     // so check them here.
9325     if (validateInstruction(Inst, Operands)) {
9326       // Still progress the IT block, otherwise one wrong condition causes
9327       // nasty cascading errors.
9328       forwardITPosition();
9329       return true;
9330     }
9331 
9332     { // processInstruction() updates inITBlock state, we need to save it away
9333       bool wasInITBlock = inITBlock();
9334 
9335       // Some instructions need post-processing to, for example, tweak which
9336       // encoding is selected. Loop on it while changes happen so the
9337       // individual transformations can chain off each other. E.g.,
9338       // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
9339       while (processInstruction(Inst, Operands, Out))
9340         LLVM_DEBUG(dbgs() << "Changed to: ";
9341                    Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode()));
9342                    dbgs() << "\n");
9343 
9344       // Only after the instruction is fully processed, we can validate it
9345       if (wasInITBlock && hasV8Ops() && isThumb() &&
9346           !isV8EligibleForIT(&Inst)) {
9347         Warning(IDLoc, "deprecated instruction in IT block");
9348       }
9349     }
9350 
9351     // Only move forward at the very end so that everything in validate
9352     // and process gets a consistent answer about whether we're in an IT
9353     // block.
9354     forwardITPosition();
9355 
9356     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
9357     // doesn't actually encode.
9358     if (Inst.getOpcode() == ARM::ITasm)
9359       return false;
9360 
9361     Inst.setLoc(IDLoc);
9362     if (PendConditionalInstruction) {
9363       PendingConditionalInsts.push_back(Inst);
9364       if (isITBlockFull() || isITBlockTerminator(Inst))
9365         flushPendingInstructions(Out);
9366     } else {
9367       Out.EmitInstruction(Inst, getSTI());
9368     }
9369     return false;
9370   case Match_NearMisses:
9371     ReportNearMisses(NearMisses, IDLoc, Operands);
9372     return true;
9373   case Match_MnemonicFail: {
9374     FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
9375     std::string Suggestion = ARMMnemonicSpellCheck(
9376       ((ARMOperand &)*Operands[0]).getToken(), FBS);
9377     return Error(IDLoc, "invalid instruction" + Suggestion,
9378                  ((ARMOperand &)*Operands[0]).getLocRange());
9379   }
9380   }
9381 
9382   llvm_unreachable("Implement any new match types added!");
9383 }
9384 
9385 /// parseDirective parses the arm specific directives
9386 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
9387   const MCObjectFileInfo::Environment Format =
9388     getContext().getObjectFileInfo()->getObjectFileType();
9389   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
9390   bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
9391 
9392   StringRef IDVal = DirectiveID.getIdentifier();
9393   if (IDVal == ".word")
9394     parseLiteralValues(4, DirectiveID.getLoc());
9395   else if (IDVal == ".short" || IDVal == ".hword")
9396     parseLiteralValues(2, DirectiveID.getLoc());
9397   else if (IDVal == ".thumb")
9398     parseDirectiveThumb(DirectiveID.getLoc());
9399   else if (IDVal == ".arm")
9400     parseDirectiveARM(DirectiveID.getLoc());
9401   else if (IDVal == ".thumb_func")
9402     parseDirectiveThumbFunc(DirectiveID.getLoc());
9403   else if (IDVal == ".code")
9404     parseDirectiveCode(DirectiveID.getLoc());
9405   else if (IDVal == ".syntax")
9406     parseDirectiveSyntax(DirectiveID.getLoc());
9407   else if (IDVal == ".unreq")
9408     parseDirectiveUnreq(DirectiveID.getLoc());
9409   else if (IDVal == ".fnend")
9410     parseDirectiveFnEnd(DirectiveID.getLoc());
9411   else if (IDVal == ".cantunwind")
9412     parseDirectiveCantUnwind(DirectiveID.getLoc());
9413   else if (IDVal == ".personality")
9414     parseDirectivePersonality(DirectiveID.getLoc());
9415   else if (IDVal == ".handlerdata")
9416     parseDirectiveHandlerData(DirectiveID.getLoc());
9417   else if (IDVal == ".setfp")
9418     parseDirectiveSetFP(DirectiveID.getLoc());
9419   else if (IDVal == ".pad")
9420     parseDirectivePad(DirectiveID.getLoc());
9421   else if (IDVal == ".save")
9422     parseDirectiveRegSave(DirectiveID.getLoc(), false);
9423   else if (IDVal == ".vsave")
9424     parseDirectiveRegSave(DirectiveID.getLoc(), true);
9425   else if (IDVal == ".ltorg" || IDVal == ".pool")
9426     parseDirectiveLtorg(DirectiveID.getLoc());
9427   else if (IDVal == ".even")
9428     parseDirectiveEven(DirectiveID.getLoc());
9429   else if (IDVal == ".personalityindex")
9430     parseDirectivePersonalityIndex(DirectiveID.getLoc());
9431   else if (IDVal == ".unwind_raw")
9432     parseDirectiveUnwindRaw(DirectiveID.getLoc());
9433   else if (IDVal == ".movsp")
9434     parseDirectiveMovSP(DirectiveID.getLoc());
9435   else if (IDVal == ".arch_extension")
9436     parseDirectiveArchExtension(DirectiveID.getLoc());
9437   else if (IDVal == ".align")
9438     return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
9439   else if (IDVal == ".thumb_set")
9440     parseDirectiveThumbSet(DirectiveID.getLoc());
9441   else if (IDVal == ".inst")
9442     parseDirectiveInst(DirectiveID.getLoc());
9443   else if (IDVal == ".inst.n")
9444     parseDirectiveInst(DirectiveID.getLoc(), 'n');
9445   else if (IDVal == ".inst.w")
9446     parseDirectiveInst(DirectiveID.getLoc(), 'w');
9447   else if (!IsMachO && !IsCOFF) {
9448     if (IDVal == ".arch")
9449       parseDirectiveArch(DirectiveID.getLoc());
9450     else if (IDVal == ".cpu")
9451       parseDirectiveCPU(DirectiveID.getLoc());
9452     else if (IDVal == ".eabi_attribute")
9453       parseDirectiveEabiAttr(DirectiveID.getLoc());
9454     else if (IDVal == ".fpu")
9455       parseDirectiveFPU(DirectiveID.getLoc());
9456     else if (IDVal == ".fnstart")
9457       parseDirectiveFnStart(DirectiveID.getLoc());
9458     else if (IDVal == ".object_arch")
9459       parseDirectiveObjectArch(DirectiveID.getLoc());
9460     else if (IDVal == ".tlsdescseq")
9461       parseDirectiveTLSDescSeq(DirectiveID.getLoc());
9462     else
9463       return true;
9464   } else
9465     return true;
9466   return false;
9467 }
9468 
9469 /// parseLiteralValues
9470 ///  ::= .hword expression [, expression]*
9471 ///  ::= .short expression [, expression]*
9472 ///  ::= .word expression [, expression]*
9473 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
9474   auto parseOne = [&]() -> bool {
9475     const MCExpr *Value;
9476     if (getParser().parseExpression(Value))
9477       return true;
9478     getParser().getStreamer().EmitValue(Value, Size, L);
9479     return false;
9480   };
9481   return (parseMany(parseOne));
9482 }
9483 
9484 /// parseDirectiveThumb
9485 ///  ::= .thumb
9486 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
9487   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
9488       check(!hasThumb(), L, "target does not support Thumb mode"))
9489     return true;
9490 
9491   if (!isThumb())
9492     SwitchMode();
9493 
9494   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
9495   return false;
9496 }
9497 
9498 /// parseDirectiveARM
9499 ///  ::= .arm
9500 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
9501   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
9502       check(!hasARM(), L, "target does not support ARM mode"))
9503     return true;
9504 
9505   if (isThumb())
9506     SwitchMode();
9507   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
9508   return false;
9509 }
9510 
9511 void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol) {
9512   // We need to flush the current implicit IT block on a label, because it is
9513   // not legal to branch into an IT block.
9514   flushPendingInstructions(getStreamer());
9515 }
9516 
9517 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
9518   if (NextSymbolIsThumb) {
9519     getParser().getStreamer().EmitThumbFunc(Symbol);
9520     NextSymbolIsThumb = false;
9521   }
9522 }
9523 
9524 /// parseDirectiveThumbFunc
9525 ///  ::= .thumbfunc symbol_name
9526 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
9527   MCAsmParser &Parser = getParser();
9528   const auto Format = getContext().getObjectFileInfo()->getObjectFileType();
9529   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
9530 
9531   // Darwin asm has (optionally) function name after .thumb_func direction
9532   // ELF doesn't
9533 
9534   if (IsMachO) {
9535     if (Parser.getTok().is(AsmToken::Identifier) ||
9536         Parser.getTok().is(AsmToken::String)) {
9537       MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
9538           Parser.getTok().getIdentifier());
9539       getParser().getStreamer().EmitThumbFunc(Func);
9540       Parser.Lex();
9541       if (parseToken(AsmToken::EndOfStatement,
9542                      "unexpected token in '.thumb_func' directive"))
9543         return true;
9544       return false;
9545     }
9546   }
9547 
9548   if (parseToken(AsmToken::EndOfStatement,
9549                  "unexpected token in '.thumb_func' directive"))
9550     return true;
9551 
9552   NextSymbolIsThumb = true;
9553   return false;
9554 }
9555 
9556 /// parseDirectiveSyntax
9557 ///  ::= .syntax unified | divided
9558 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
9559   MCAsmParser &Parser = getParser();
9560   const AsmToken &Tok = Parser.getTok();
9561   if (Tok.isNot(AsmToken::Identifier)) {
9562     Error(L, "unexpected token in .syntax directive");
9563     return false;
9564   }
9565 
9566   StringRef Mode = Tok.getString();
9567   Parser.Lex();
9568   if (check(Mode == "divided" || Mode == "DIVIDED", L,
9569             "'.syntax divided' arm assembly not supported") ||
9570       check(Mode != "unified" && Mode != "UNIFIED", L,
9571             "unrecognized syntax mode in .syntax directive") ||
9572       parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
9573     return true;
9574 
9575   // TODO tell the MC streamer the mode
9576   // getParser().getStreamer().Emit???();
9577   return false;
9578 }
9579 
9580 /// parseDirectiveCode
9581 ///  ::= .code 16 | 32
9582 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
9583   MCAsmParser &Parser = getParser();
9584   const AsmToken &Tok = Parser.getTok();
9585   if (Tok.isNot(AsmToken::Integer))
9586     return Error(L, "unexpected token in .code directive");
9587   int64_t Val = Parser.getTok().getIntVal();
9588   if (Val != 16 && Val != 32) {
9589     Error(L, "invalid operand to .code directive");
9590     return false;
9591   }
9592   Parser.Lex();
9593 
9594   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
9595     return true;
9596 
9597   if (Val == 16) {
9598     if (!hasThumb())
9599       return Error(L, "target does not support Thumb mode");
9600 
9601     if (!isThumb())
9602       SwitchMode();
9603     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
9604   } else {
9605     if (!hasARM())
9606       return Error(L, "target does not support ARM mode");
9607 
9608     if (isThumb())
9609       SwitchMode();
9610     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
9611   }
9612 
9613   return false;
9614 }
9615 
9616 /// parseDirectiveReq
9617 ///  ::= name .req registername
9618 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
9619   MCAsmParser &Parser = getParser();
9620   Parser.Lex(); // Eat the '.req' token.
9621   unsigned Reg;
9622   SMLoc SRegLoc, ERegLoc;
9623   if (check(ParseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
9624             "register name expected") ||
9625       parseToken(AsmToken::EndOfStatement,
9626                  "unexpected input in .req directive."))
9627     return true;
9628 
9629   if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
9630     return Error(SRegLoc,
9631                  "redefinition of '" + Name + "' does not match original.");
9632 
9633   return false;
9634 }
9635 
9636 /// parseDirectiveUneq
9637 ///  ::= .unreq registername
9638 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
9639   MCAsmParser &Parser = getParser();
9640   if (Parser.getTok().isNot(AsmToken::Identifier))
9641     return Error(L, "unexpected input in .unreq directive.");
9642   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
9643   Parser.Lex(); // Eat the identifier.
9644   if (parseToken(AsmToken::EndOfStatement,
9645                  "unexpected input in '.unreq' directive"))
9646     return true;
9647   return false;
9648 }
9649 
9650 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
9651 // before, if supported by the new target, or emit mapping symbols for the mode
9652 // switch.
9653 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
9654   if (WasThumb != isThumb()) {
9655     if (WasThumb && hasThumb()) {
9656       // Stay in Thumb mode
9657       SwitchMode();
9658     } else if (!WasThumb && hasARM()) {
9659       // Stay in ARM mode
9660       SwitchMode();
9661     } else {
9662       // Mode switch forced, because the new arch doesn't support the old mode.
9663       getParser().getStreamer().EmitAssemblerFlag(isThumb() ? MCAF_Code16
9664                                                             : MCAF_Code32);
9665       // Warn about the implcit mode switch. GAS does not switch modes here,
9666       // but instead stays in the old mode, reporting an error on any following
9667       // instructions as the mode does not exist on the target.
9668       Warning(Loc, Twine("new target does not support ") +
9669                        (WasThumb ? "thumb" : "arm") + " mode, switching to " +
9670                        (!WasThumb ? "thumb" : "arm") + " mode");
9671     }
9672   }
9673 }
9674 
9675 /// parseDirectiveArch
9676 ///  ::= .arch token
9677 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
9678   StringRef Arch = getParser().parseStringToEndOfStatement().trim();
9679   ARM::ArchKind ID = ARM::parseArch(Arch);
9680 
9681   if (ID == ARM::ArchKind::INVALID)
9682     return Error(L, "Unknown arch name");
9683 
9684   bool WasThumb = isThumb();
9685   Triple T;
9686   MCSubtargetInfo &STI = copySTI();
9687   STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str());
9688   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9689   FixModeAfterArchChange(WasThumb, L);
9690 
9691   getTargetStreamer().emitArch(ID);
9692   return false;
9693 }
9694 
9695 /// parseDirectiveEabiAttr
9696 ///  ::= .eabi_attribute int, int [, "str"]
9697 ///  ::= .eabi_attribute Tag_name, int [, "str"]
9698 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
9699   MCAsmParser &Parser = getParser();
9700   int64_t Tag;
9701   SMLoc TagLoc;
9702   TagLoc = Parser.getTok().getLoc();
9703   if (Parser.getTok().is(AsmToken::Identifier)) {
9704     StringRef Name = Parser.getTok().getIdentifier();
9705     Tag = ARMBuildAttrs::AttrTypeFromString(Name);
9706     if (Tag == -1) {
9707       Error(TagLoc, "attribute name not recognised: " + Name);
9708       return false;
9709     }
9710     Parser.Lex();
9711   } else {
9712     const MCExpr *AttrExpr;
9713 
9714     TagLoc = Parser.getTok().getLoc();
9715     if (Parser.parseExpression(AttrExpr))
9716       return true;
9717 
9718     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
9719     if (check(!CE, TagLoc, "expected numeric constant"))
9720       return true;
9721 
9722     Tag = CE->getValue();
9723   }
9724 
9725   if (Parser.parseToken(AsmToken::Comma, "comma expected"))
9726     return true;
9727 
9728   StringRef StringValue = "";
9729   bool IsStringValue = false;
9730 
9731   int64_t IntegerValue = 0;
9732   bool IsIntegerValue = false;
9733 
9734   if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
9735     IsStringValue = true;
9736   else if (Tag == ARMBuildAttrs::compatibility) {
9737     IsStringValue = true;
9738     IsIntegerValue = true;
9739   } else if (Tag < 32 || Tag % 2 == 0)
9740     IsIntegerValue = true;
9741   else if (Tag % 2 == 1)
9742     IsStringValue = true;
9743   else
9744     llvm_unreachable("invalid tag type");
9745 
9746   if (IsIntegerValue) {
9747     const MCExpr *ValueExpr;
9748     SMLoc ValueExprLoc = Parser.getTok().getLoc();
9749     if (Parser.parseExpression(ValueExpr))
9750       return true;
9751 
9752     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
9753     if (!CE)
9754       return Error(ValueExprLoc, "expected numeric constant");
9755     IntegerValue = CE->getValue();
9756   }
9757 
9758   if (Tag == ARMBuildAttrs::compatibility) {
9759     if (Parser.parseToken(AsmToken::Comma, "comma expected"))
9760       return true;
9761   }
9762 
9763   if (IsStringValue) {
9764     if (Parser.getTok().isNot(AsmToken::String))
9765       return Error(Parser.getTok().getLoc(), "bad string constant");
9766 
9767     StringValue = Parser.getTok().getStringContents();
9768     Parser.Lex();
9769   }
9770 
9771   if (Parser.parseToken(AsmToken::EndOfStatement,
9772                         "unexpected token in '.eabi_attribute' directive"))
9773     return true;
9774 
9775   if (IsIntegerValue && IsStringValue) {
9776     assert(Tag == ARMBuildAttrs::compatibility);
9777     getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
9778   } else if (IsIntegerValue)
9779     getTargetStreamer().emitAttribute(Tag, IntegerValue);
9780   else if (IsStringValue)
9781     getTargetStreamer().emitTextAttribute(Tag, StringValue);
9782   return false;
9783 }
9784 
9785 /// parseDirectiveCPU
9786 ///  ::= .cpu str
9787 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
9788   StringRef CPU = getParser().parseStringToEndOfStatement().trim();
9789   getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
9790 
9791   // FIXME: This is using table-gen data, but should be moved to
9792   // ARMTargetParser once that is table-gen'd.
9793   if (!getSTI().isCPUStringValid(CPU))
9794     return Error(L, "Unknown CPU name");
9795 
9796   bool WasThumb = isThumb();
9797   MCSubtargetInfo &STI = copySTI();
9798   STI.setDefaultFeatures(CPU, "");
9799   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9800   FixModeAfterArchChange(WasThumb, L);
9801 
9802   return false;
9803 }
9804 
9805 /// parseDirectiveFPU
9806 ///  ::= .fpu str
9807 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
9808   SMLoc FPUNameLoc = getTok().getLoc();
9809   StringRef FPU = getParser().parseStringToEndOfStatement().trim();
9810 
9811   unsigned ID = ARM::parseFPU(FPU);
9812   std::vector<StringRef> Features;
9813   if (!ARM::getFPUFeatures(ID, Features))
9814     return Error(FPUNameLoc, "Unknown FPU name");
9815 
9816   MCSubtargetInfo &STI = copySTI();
9817   for (auto Feature : Features)
9818     STI.ApplyFeatureFlag(Feature);
9819   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9820 
9821   getTargetStreamer().emitFPU(ID);
9822   return false;
9823 }
9824 
9825 /// parseDirectiveFnStart
9826 ///  ::= .fnstart
9827 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
9828   if (parseToken(AsmToken::EndOfStatement,
9829                  "unexpected token in '.fnstart' directive"))
9830     return true;
9831 
9832   if (UC.hasFnStart()) {
9833     Error(L, ".fnstart starts before the end of previous one");
9834     UC.emitFnStartLocNotes();
9835     return true;
9836   }
9837 
9838   // Reset the unwind directives parser state
9839   UC.reset();
9840 
9841   getTargetStreamer().emitFnStart();
9842 
9843   UC.recordFnStart(L);
9844   return false;
9845 }
9846 
9847 /// parseDirectiveFnEnd
9848 ///  ::= .fnend
9849 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
9850   if (parseToken(AsmToken::EndOfStatement,
9851                  "unexpected token in '.fnend' directive"))
9852     return true;
9853   // Check the ordering of unwind directives
9854   if (!UC.hasFnStart())
9855     return Error(L, ".fnstart must precede .fnend directive");
9856 
9857   // Reset the unwind directives parser state
9858   getTargetStreamer().emitFnEnd();
9859 
9860   UC.reset();
9861   return false;
9862 }
9863 
9864 /// parseDirectiveCantUnwind
9865 ///  ::= .cantunwind
9866 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
9867   if (parseToken(AsmToken::EndOfStatement,
9868                  "unexpected token in '.cantunwind' directive"))
9869     return true;
9870 
9871   UC.recordCantUnwind(L);
9872   // Check the ordering of unwind directives
9873   if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
9874     return true;
9875 
9876   if (UC.hasHandlerData()) {
9877     Error(L, ".cantunwind can't be used with .handlerdata directive");
9878     UC.emitHandlerDataLocNotes();
9879     return true;
9880   }
9881   if (UC.hasPersonality()) {
9882     Error(L, ".cantunwind can't be used with .personality directive");
9883     UC.emitPersonalityLocNotes();
9884     return true;
9885   }
9886 
9887   getTargetStreamer().emitCantUnwind();
9888   return false;
9889 }
9890 
9891 /// parseDirectivePersonality
9892 ///  ::= .personality name
9893 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
9894   MCAsmParser &Parser = getParser();
9895   bool HasExistingPersonality = UC.hasPersonality();
9896 
9897   // Parse the name of the personality routine
9898   if (Parser.getTok().isNot(AsmToken::Identifier))
9899     return Error(L, "unexpected input in .personality directive.");
9900   StringRef Name(Parser.getTok().getIdentifier());
9901   Parser.Lex();
9902 
9903   if (parseToken(AsmToken::EndOfStatement,
9904                  "unexpected token in '.personality' directive"))
9905     return true;
9906 
9907   UC.recordPersonality(L);
9908 
9909   // Check the ordering of unwind directives
9910   if (!UC.hasFnStart())
9911     return Error(L, ".fnstart must precede .personality directive");
9912   if (UC.cantUnwind()) {
9913     Error(L, ".personality can't be used with .cantunwind directive");
9914     UC.emitCantUnwindLocNotes();
9915     return true;
9916   }
9917   if (UC.hasHandlerData()) {
9918     Error(L, ".personality must precede .handlerdata directive");
9919     UC.emitHandlerDataLocNotes();
9920     return true;
9921   }
9922   if (HasExistingPersonality) {
9923     Error(L, "multiple personality directives");
9924     UC.emitPersonalityLocNotes();
9925     return true;
9926   }
9927 
9928   MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
9929   getTargetStreamer().emitPersonality(PR);
9930   return false;
9931 }
9932 
9933 /// parseDirectiveHandlerData
9934 ///  ::= .handlerdata
9935 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
9936   if (parseToken(AsmToken::EndOfStatement,
9937                  "unexpected token in '.handlerdata' directive"))
9938     return true;
9939 
9940   UC.recordHandlerData(L);
9941   // Check the ordering of unwind directives
9942   if (!UC.hasFnStart())
9943     return Error(L, ".fnstart must precede .personality directive");
9944   if (UC.cantUnwind()) {
9945     Error(L, ".handlerdata can't be used with .cantunwind directive");
9946     UC.emitCantUnwindLocNotes();
9947     return true;
9948   }
9949 
9950   getTargetStreamer().emitHandlerData();
9951   return false;
9952 }
9953 
9954 /// parseDirectiveSetFP
9955 ///  ::= .setfp fpreg, spreg [, offset]
9956 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
9957   MCAsmParser &Parser = getParser();
9958   // Check the ordering of unwind directives
9959   if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
9960       check(UC.hasHandlerData(), L,
9961             ".setfp must precede .handlerdata directive"))
9962     return true;
9963 
9964   // Parse fpreg
9965   SMLoc FPRegLoc = Parser.getTok().getLoc();
9966   int FPReg = tryParseRegister();
9967 
9968   if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
9969       Parser.parseToken(AsmToken::Comma, "comma expected"))
9970     return true;
9971 
9972   // Parse spreg
9973   SMLoc SPRegLoc = Parser.getTok().getLoc();
9974   int SPReg = tryParseRegister();
9975   if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
9976       check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
9977             "register should be either $sp or the latest fp register"))
9978     return true;
9979 
9980   // Update the frame pointer register
9981   UC.saveFPReg(FPReg);
9982 
9983   // Parse offset
9984   int64_t Offset = 0;
9985   if (Parser.parseOptionalToken(AsmToken::Comma)) {
9986     if (Parser.getTok().isNot(AsmToken::Hash) &&
9987         Parser.getTok().isNot(AsmToken::Dollar))
9988       return Error(Parser.getTok().getLoc(), "'#' expected");
9989     Parser.Lex(); // skip hash token.
9990 
9991     const MCExpr *OffsetExpr;
9992     SMLoc ExLoc = Parser.getTok().getLoc();
9993     SMLoc EndLoc;
9994     if (getParser().parseExpression(OffsetExpr, EndLoc))
9995       return Error(ExLoc, "malformed setfp offset");
9996     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9997     if (check(!CE, ExLoc, "setfp offset must be an immediate"))
9998       return true;
9999     Offset = CE->getValue();
10000   }
10001 
10002   if (Parser.parseToken(AsmToken::EndOfStatement))
10003     return true;
10004 
10005   getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
10006                                 static_cast<unsigned>(SPReg), Offset);
10007   return false;
10008 }
10009 
10010 /// parseDirective
10011 ///  ::= .pad offset
10012 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
10013   MCAsmParser &Parser = getParser();
10014   // Check the ordering of unwind directives
10015   if (!UC.hasFnStart())
10016     return Error(L, ".fnstart must precede .pad directive");
10017   if (UC.hasHandlerData())
10018     return Error(L, ".pad must precede .handlerdata directive");
10019 
10020   // Parse the offset
10021   if (Parser.getTok().isNot(AsmToken::Hash) &&
10022       Parser.getTok().isNot(AsmToken::Dollar))
10023     return Error(Parser.getTok().getLoc(), "'#' expected");
10024   Parser.Lex(); // skip hash token.
10025 
10026   const MCExpr *OffsetExpr;
10027   SMLoc ExLoc = Parser.getTok().getLoc();
10028   SMLoc EndLoc;
10029   if (getParser().parseExpression(OffsetExpr, EndLoc))
10030     return Error(ExLoc, "malformed pad offset");
10031   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10032   if (!CE)
10033     return Error(ExLoc, "pad offset must be an immediate");
10034 
10035   if (parseToken(AsmToken::EndOfStatement,
10036                  "unexpected token in '.pad' directive"))
10037     return true;
10038 
10039   getTargetStreamer().emitPad(CE->getValue());
10040   return false;
10041 }
10042 
10043 /// parseDirectiveRegSave
10044 ///  ::= .save  { registers }
10045 ///  ::= .vsave { registers }
10046 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
10047   // Check the ordering of unwind directives
10048   if (!UC.hasFnStart())
10049     return Error(L, ".fnstart must precede .save or .vsave directives");
10050   if (UC.hasHandlerData())
10051     return Error(L, ".save or .vsave must precede .handlerdata directive");
10052 
10053   // RAII object to make sure parsed operands are deleted.
10054   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
10055 
10056   // Parse the register list
10057   if (parseRegisterList(Operands) ||
10058       parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10059     return true;
10060   ARMOperand &Op = (ARMOperand &)*Operands[0];
10061   if (!IsVector && !Op.isRegList())
10062     return Error(L, ".save expects GPR registers");
10063   if (IsVector && !Op.isDPRRegList())
10064     return Error(L, ".vsave expects DPR registers");
10065 
10066   getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
10067   return false;
10068 }
10069 
10070 /// parseDirectiveInst
10071 ///  ::= .inst opcode [, ...]
10072 ///  ::= .inst.n opcode [, ...]
10073 ///  ::= .inst.w opcode [, ...]
10074 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
10075   int Width = 4;
10076 
10077   if (isThumb()) {
10078     switch (Suffix) {
10079     case 'n':
10080       Width = 2;
10081       break;
10082     case 'w':
10083       break;
10084     default:
10085       Width = 0;
10086       break;
10087     }
10088   } else {
10089     if (Suffix)
10090       return Error(Loc, "width suffixes are invalid in ARM mode");
10091   }
10092 
10093   auto parseOne = [&]() -> bool {
10094     const MCExpr *Expr;
10095     if (getParser().parseExpression(Expr))
10096       return true;
10097     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
10098     if (!Value) {
10099       return Error(Loc, "expected constant expression");
10100     }
10101 
10102     char CurSuffix = Suffix;
10103     switch (Width) {
10104     case 2:
10105       if (Value->getValue() > 0xffff)
10106         return Error(Loc, "inst.n operand is too big, use inst.w instead");
10107       break;
10108     case 4:
10109       if (Value->getValue() > 0xffffffff)
10110         return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
10111                               " operand is too big");
10112       break;
10113     case 0:
10114       // Thumb mode, no width indicated. Guess from the opcode, if possible.
10115       if (Value->getValue() < 0xe800)
10116         CurSuffix = 'n';
10117       else if (Value->getValue() >= 0xe8000000)
10118         CurSuffix = 'w';
10119       else
10120         return Error(Loc, "cannot determine Thumb instruction size, "
10121                           "use inst.n/inst.w instead");
10122       break;
10123     default:
10124       llvm_unreachable("only supported widths are 2 and 4");
10125     }
10126 
10127     getTargetStreamer().emitInst(Value->getValue(), CurSuffix);
10128     return false;
10129   };
10130 
10131   if (parseOptionalToken(AsmToken::EndOfStatement))
10132     return Error(Loc, "expected expression following directive");
10133   if (parseMany(parseOne))
10134     return true;
10135   return false;
10136 }
10137 
10138 /// parseDirectiveLtorg
10139 ///  ::= .ltorg | .pool
10140 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
10141   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10142     return true;
10143   getTargetStreamer().emitCurrentConstantPool();
10144   return false;
10145 }
10146 
10147 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
10148   const MCSection *Section = getStreamer().getCurrentSectionOnly();
10149 
10150   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
10151     return true;
10152 
10153   if (!Section) {
10154     getStreamer().InitSections(false);
10155     Section = getStreamer().getCurrentSectionOnly();
10156   }
10157 
10158   assert(Section && "must have section to emit alignment");
10159   if (Section->UseCodeAlign())
10160     getStreamer().EmitCodeAlignment(2);
10161   else
10162     getStreamer().EmitValueToAlignment(2);
10163 
10164   return false;
10165 }
10166 
10167 /// parseDirectivePersonalityIndex
10168 ///   ::= .personalityindex index
10169 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
10170   MCAsmParser &Parser = getParser();
10171   bool HasExistingPersonality = UC.hasPersonality();
10172 
10173   const MCExpr *IndexExpression;
10174   SMLoc IndexLoc = Parser.getTok().getLoc();
10175   if (Parser.parseExpression(IndexExpression) ||
10176       parseToken(AsmToken::EndOfStatement,
10177                  "unexpected token in '.personalityindex' directive")) {
10178     return true;
10179   }
10180 
10181   UC.recordPersonalityIndex(L);
10182 
10183   if (!UC.hasFnStart()) {
10184     return Error(L, ".fnstart must precede .personalityindex directive");
10185   }
10186   if (UC.cantUnwind()) {
10187     Error(L, ".personalityindex cannot be used with .cantunwind");
10188     UC.emitCantUnwindLocNotes();
10189     return true;
10190   }
10191   if (UC.hasHandlerData()) {
10192     Error(L, ".personalityindex must precede .handlerdata directive");
10193     UC.emitHandlerDataLocNotes();
10194     return true;
10195   }
10196   if (HasExistingPersonality) {
10197     Error(L, "multiple personality directives");
10198     UC.emitPersonalityLocNotes();
10199     return true;
10200   }
10201 
10202   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
10203   if (!CE)
10204     return Error(IndexLoc, "index must be a constant number");
10205   if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
10206     return Error(IndexLoc,
10207                  "personality routine index should be in range [0-3]");
10208 
10209   getTargetStreamer().emitPersonalityIndex(CE->getValue());
10210   return false;
10211 }
10212 
10213 /// parseDirectiveUnwindRaw
10214 ///   ::= .unwind_raw offset, opcode [, opcode...]
10215 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
10216   MCAsmParser &Parser = getParser();
10217   int64_t StackOffset;
10218   const MCExpr *OffsetExpr;
10219   SMLoc OffsetLoc = getLexer().getLoc();
10220 
10221   if (!UC.hasFnStart())
10222     return Error(L, ".fnstart must precede .unwind_raw directives");
10223   if (getParser().parseExpression(OffsetExpr))
10224     return Error(OffsetLoc, "expected expression");
10225 
10226   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10227   if (!CE)
10228     return Error(OffsetLoc, "offset must be a constant");
10229 
10230   StackOffset = CE->getValue();
10231 
10232   if (Parser.parseToken(AsmToken::Comma, "expected comma"))
10233     return true;
10234 
10235   SmallVector<uint8_t, 16> Opcodes;
10236 
10237   auto parseOne = [&]() -> bool {
10238     const MCExpr *OE;
10239     SMLoc OpcodeLoc = getLexer().getLoc();
10240     if (check(getLexer().is(AsmToken::EndOfStatement) ||
10241                   Parser.parseExpression(OE),
10242               OpcodeLoc, "expected opcode expression"))
10243       return true;
10244     const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
10245     if (!OC)
10246       return Error(OpcodeLoc, "opcode value must be a constant");
10247     const int64_t Opcode = OC->getValue();
10248     if (Opcode & ~0xff)
10249       return Error(OpcodeLoc, "invalid opcode");
10250     Opcodes.push_back(uint8_t(Opcode));
10251     return false;
10252   };
10253 
10254   // Must have at least 1 element
10255   SMLoc OpcodeLoc = getLexer().getLoc();
10256   if (parseOptionalToken(AsmToken::EndOfStatement))
10257     return Error(OpcodeLoc, "expected opcode expression");
10258   if (parseMany(parseOne))
10259     return true;
10260 
10261   getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
10262   return false;
10263 }
10264 
10265 /// parseDirectiveTLSDescSeq
10266 ///   ::= .tlsdescseq tls-variable
10267 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
10268   MCAsmParser &Parser = getParser();
10269 
10270   if (getLexer().isNot(AsmToken::Identifier))
10271     return TokError("expected variable after '.tlsdescseq' directive");
10272 
10273   const MCSymbolRefExpr *SRE =
10274     MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
10275                             MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
10276   Lex();
10277 
10278   if (parseToken(AsmToken::EndOfStatement,
10279                  "unexpected token in '.tlsdescseq' directive"))
10280     return true;
10281 
10282   getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
10283   return false;
10284 }
10285 
10286 /// parseDirectiveMovSP
10287 ///  ::= .movsp reg [, #offset]
10288 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
10289   MCAsmParser &Parser = getParser();
10290   if (!UC.hasFnStart())
10291     return Error(L, ".fnstart must precede .movsp directives");
10292   if (UC.getFPReg() != ARM::SP)
10293     return Error(L, "unexpected .movsp directive");
10294 
10295   SMLoc SPRegLoc = Parser.getTok().getLoc();
10296   int SPReg = tryParseRegister();
10297   if (SPReg == -1)
10298     return Error(SPRegLoc, "register expected");
10299   if (SPReg == ARM::SP || SPReg == ARM::PC)
10300     return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
10301 
10302   int64_t Offset = 0;
10303   if (Parser.parseOptionalToken(AsmToken::Comma)) {
10304     if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
10305       return true;
10306 
10307     const MCExpr *OffsetExpr;
10308     SMLoc OffsetLoc = Parser.getTok().getLoc();
10309 
10310     if (Parser.parseExpression(OffsetExpr))
10311       return Error(OffsetLoc, "malformed offset expression");
10312 
10313     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10314     if (!CE)
10315       return Error(OffsetLoc, "offset must be an immediate constant");
10316 
10317     Offset = CE->getValue();
10318   }
10319 
10320   if (parseToken(AsmToken::EndOfStatement,
10321                  "unexpected token in '.movsp' directive"))
10322     return true;
10323 
10324   getTargetStreamer().emitMovSP(SPReg, Offset);
10325   UC.saveFPReg(SPReg);
10326 
10327   return false;
10328 }
10329 
10330 /// parseDirectiveObjectArch
10331 ///   ::= .object_arch name
10332 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
10333   MCAsmParser &Parser = getParser();
10334   if (getLexer().isNot(AsmToken::Identifier))
10335     return Error(getLexer().getLoc(), "unexpected token");
10336 
10337   StringRef Arch = Parser.getTok().getString();
10338   SMLoc ArchLoc = Parser.getTok().getLoc();
10339   Lex();
10340 
10341   ARM::ArchKind ID = ARM::parseArch(Arch);
10342 
10343   if (ID == ARM::ArchKind::INVALID)
10344     return Error(ArchLoc, "unknown architecture '" + Arch + "'");
10345   if (parseToken(AsmToken::EndOfStatement))
10346     return true;
10347 
10348   getTargetStreamer().emitObjectArch(ID);
10349   return false;
10350 }
10351 
10352 /// parseDirectiveAlign
10353 ///   ::= .align
10354 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
10355   // NOTE: if this is not the end of the statement, fall back to the target
10356   // agnostic handling for this directive which will correctly handle this.
10357   if (parseOptionalToken(AsmToken::EndOfStatement)) {
10358     // '.align' is target specifically handled to mean 2**2 byte alignment.
10359     const MCSection *Section = getStreamer().getCurrentSectionOnly();
10360     assert(Section && "must have section to emit alignment");
10361     if (Section->UseCodeAlign())
10362       getStreamer().EmitCodeAlignment(4, 0);
10363     else
10364       getStreamer().EmitValueToAlignment(4, 0, 1, 0);
10365     return false;
10366   }
10367   return true;
10368 }
10369 
10370 /// parseDirectiveThumbSet
10371 ///  ::= .thumb_set name, value
10372 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
10373   MCAsmParser &Parser = getParser();
10374 
10375   StringRef Name;
10376   if (check(Parser.parseIdentifier(Name),
10377             "expected identifier after '.thumb_set'") ||
10378       parseToken(AsmToken::Comma, "expected comma after name '" + Name + "'"))
10379     return true;
10380 
10381   MCSymbol *Sym;
10382   const MCExpr *Value;
10383   if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
10384                                                Parser, Sym, Value))
10385     return true;
10386 
10387   getTargetStreamer().emitThumbSet(Sym, Value);
10388   return false;
10389 }
10390 
10391 /// Force static initialization.
10392 extern "C" void LLVMInitializeARMAsmParser() {
10393   RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
10394   RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
10395   RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
10396   RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
10397 }
10398 
10399 #define GET_REGISTER_MATCHER
10400 #define GET_SUBTARGET_FEATURE_NAME
10401 #define GET_MATCHER_IMPLEMENTATION
10402 #define GET_MNEMONIC_SPELL_CHECKER
10403 #include "ARMGenAsmMatcher.inc"
10404 
10405 // Some diagnostics need to vary with subtarget features, so they are handled
10406 // here. For example, the DPR class has either 16 or 32 registers, depending
10407 // on the FPU available.
10408 const char *
10409 ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
10410   switch (MatchError) {
10411   // rGPR contains sp starting with ARMv8.
10412   case Match_rGPR:
10413     return hasV8Ops() ? "operand must be a register in range [r0, r14]"
10414                       : "operand must be a register in range [r0, r12] or r14";
10415   // DPR contains 16 registers for some FPUs, and 32 for others.
10416   case Match_DPR:
10417     return hasD16() ? "operand must be a register in range [d0, d15]"
10418                     : "operand must be a register in range [d0, d31]";
10419   case Match_DPR_RegList:
10420     return hasD16() ? "operand must be a list of registers in range [d0, d15]"
10421                     : "operand must be a list of registers in range [d0, d31]";
10422 
10423   // For all other diags, use the static string from tablegen.
10424   default:
10425     return getMatchKindDiag(MatchError);
10426   }
10427 }
10428 
10429 // Process the list of near-misses, throwing away ones we don't want to report
10430 // to the user, and converting the rest to a source location and string that
10431 // should be reported.
10432 void
10433 ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
10434                                SmallVectorImpl<NearMissMessage> &NearMissesOut,
10435                                SMLoc IDLoc, OperandVector &Operands) {
10436   // TODO: If operand didn't match, sub in a dummy one and run target
10437   // predicate, so that we can avoid reporting near-misses that are invalid?
10438   // TODO: Many operand types dont have SuperClasses set, so we report
10439   // redundant ones.
10440   // TODO: Some operands are superclasses of registers (e.g.
10441   // MCK_RegShiftedImm), we don't have any way to represent that currently.
10442   // TODO: This is not all ARM-specific, can some of it be factored out?
10443 
10444   // Record some information about near-misses that we have already seen, so
10445   // that we can avoid reporting redundant ones. For example, if there are
10446   // variants of an instruction that take 8- and 16-bit immediates, we want
10447   // to only report the widest one.
10448   std::multimap<unsigned, unsigned> OperandMissesSeen;
10449   SmallSet<FeatureBitset, 4> FeatureMissesSeen;
10450   bool ReportedTooFewOperands = false;
10451 
10452   // Process the near-misses in reverse order, so that we see more general ones
10453   // first, and so can avoid emitting more specific ones.
10454   for (NearMissInfo &I : reverse(NearMissesIn)) {
10455     switch (I.getKind()) {
10456     case NearMissInfo::NearMissOperand: {
10457       SMLoc OperandLoc =
10458           ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
10459       const char *OperandDiag =
10460           getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
10461 
10462       // If we have already emitted a message for a superclass, don't also report
10463       // the sub-class. We consider all operand classes that we don't have a
10464       // specialised diagnostic for to be equal for the propose of this check,
10465       // so that we don't report the generic error multiple times on the same
10466       // operand.
10467       unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
10468       auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
10469       if (std::any_of(PrevReports.first, PrevReports.second,
10470                       [DupCheckMatchClass](
10471                           const std::pair<unsigned, unsigned> Pair) {
10472             if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
10473               return Pair.second == DupCheckMatchClass;
10474             else
10475               return isSubclass((MatchClassKind)DupCheckMatchClass,
10476                                 (MatchClassKind)Pair.second);
10477           }))
10478         break;
10479       OperandMissesSeen.insert(
10480           std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
10481 
10482       NearMissMessage Message;
10483       Message.Loc = OperandLoc;
10484       if (OperandDiag) {
10485         Message.Message = OperandDiag;
10486       } else if (I.getOperandClass() == InvalidMatchClass) {
10487         Message.Message = "too many operands for instruction";
10488       } else {
10489         Message.Message = "invalid operand for instruction";
10490         LLVM_DEBUG(
10491             dbgs() << "Missing diagnostic string for operand class "
10492                    << getMatchClassName((MatchClassKind)I.getOperandClass())
10493                    << I.getOperandClass() << ", error " << I.getOperandError()
10494                    << ", opcode " << MII.getName(I.getOpcode()) << "\n");
10495       }
10496       NearMissesOut.emplace_back(Message);
10497       break;
10498     }
10499     case NearMissInfo::NearMissFeature: {
10500       const FeatureBitset &MissingFeatures = I.getFeatures();
10501       // Don't report the same set of features twice.
10502       if (FeatureMissesSeen.count(MissingFeatures))
10503         break;
10504       FeatureMissesSeen.insert(MissingFeatures);
10505 
10506       // Special case: don't report a feature set which includes arm-mode for
10507       // targets that don't have ARM mode.
10508       if (MissingFeatures.test(Feature_IsARMBit) && !hasARM())
10509         break;
10510       // Don't report any near-misses that both require switching instruction
10511       // set, and adding other subtarget features.
10512       if (isThumb() && MissingFeatures.test(Feature_IsARMBit) &&
10513           MissingFeatures.count() > 1)
10514         break;
10515       if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) &&
10516           MissingFeatures.count() > 1)
10517         break;
10518       if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) &&
10519           (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit,
10520                                              Feature_IsThumbBit})).any())
10521         break;
10522       if (isMClass() && MissingFeatures.test(Feature_HasNEONBit))
10523         break;
10524 
10525       NearMissMessage Message;
10526       Message.Loc = IDLoc;
10527       raw_svector_ostream OS(Message.Message);
10528 
10529       OS << "instruction requires:";
10530       for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i)
10531         if (MissingFeatures.test(i))
10532           OS << ' ' << getSubtargetFeatureName(i);
10533 
10534       NearMissesOut.emplace_back(Message);
10535 
10536       break;
10537     }
10538     case NearMissInfo::NearMissPredicate: {
10539       NearMissMessage Message;
10540       Message.Loc = IDLoc;
10541       switch (I.getPredicateError()) {
10542       case Match_RequiresNotITBlock:
10543         Message.Message = "flag setting instruction only valid outside IT block";
10544         break;
10545       case Match_RequiresITBlock:
10546         Message.Message = "instruction only valid inside IT block";
10547         break;
10548       case Match_RequiresV6:
10549         Message.Message = "instruction variant requires ARMv6 or later";
10550         break;
10551       case Match_RequiresThumb2:
10552         Message.Message = "instruction variant requires Thumb2";
10553         break;
10554       case Match_RequiresV8:
10555         Message.Message = "instruction variant requires ARMv8 or later";
10556         break;
10557       case Match_RequiresFlagSetting:
10558         Message.Message = "no flag-preserving variant of this instruction available";
10559         break;
10560       case Match_InvalidOperand:
10561         Message.Message = "invalid operand for instruction";
10562         break;
10563       default:
10564         llvm_unreachable("Unhandled target predicate error");
10565         break;
10566       }
10567       NearMissesOut.emplace_back(Message);
10568       break;
10569     }
10570     case NearMissInfo::NearMissTooFewOperands: {
10571       if (!ReportedTooFewOperands) {
10572         SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
10573         NearMissesOut.emplace_back(NearMissMessage{
10574             EndLoc, StringRef("too few operands for instruction")});
10575         ReportedTooFewOperands = true;
10576       }
10577       break;
10578     }
10579     case NearMissInfo::NoNearMiss:
10580       // This should never leave the matcher.
10581       llvm_unreachable("not a near-miss");
10582       break;
10583     }
10584   }
10585 }
10586 
10587 void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
10588                                     SMLoc IDLoc, OperandVector &Operands) {
10589   SmallVector<NearMissMessage, 4> Messages;
10590   FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
10591 
10592   if (Messages.size() == 0) {
10593     // No near-misses were found, so the best we can do is "invalid
10594     // instruction".
10595     Error(IDLoc, "invalid instruction");
10596   } else if (Messages.size() == 1) {
10597     // One near miss was found, report it as the sole error.
10598     Error(Messages[0].Loc, Messages[0].Message);
10599   } else {
10600     // More than one near miss, so report a generic "invalid instruction"
10601     // error, followed by notes for each of the near-misses.
10602     Error(IDLoc, "invalid instruction, any one of the following would fix this:");
10603     for (auto &M : Messages) {
10604       Note(M.Loc, M.Message);
10605     }
10606   }
10607 }
10608 
10609 /// parseDirectiveArchExtension
10610 ///   ::= .arch_extension [no]feature
10611 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
10612   // FIXME: This structure should be moved inside ARMTargetParser
10613   // when we start to table-generate them, and we can use the ARM
10614   // flags below, that were generated by table-gen.
10615   static const struct {
10616     const unsigned Kind;
10617     const FeatureBitset ArchCheck;
10618     const FeatureBitset Features;
10619   } Extensions[] = {
10620     { ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC} },
10621     { ARM::AEK_CRYPTO,  {Feature_HasV8Bit},
10622       {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
10623     { ARM::AEK_FP, {Feature_HasV8Bit}, {ARM::FeatureFPARMv8} },
10624     { (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM),
10625       {Feature_HasV7Bit, Feature_IsNotMClassBit},
10626       {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} },
10627     { ARM::AEK_MP, {Feature_HasV7Bit, Feature_IsNotMClassBit},
10628       {ARM::FeatureMP} },
10629     { ARM::AEK_SIMD, {Feature_HasV8Bit},
10630       {ARM::FeatureNEON, ARM::FeatureFPARMv8} },
10631     { ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone} },
10632     // FIXME: Only available in A-class, isel not predicated
10633     { ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization} },
10634     { ARM::AEK_FP16, {Feature_HasV8_2aBit},
10635       {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
10636     { ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS} },
10637     // FIXME: Unsupported extensions.
10638     { ARM::AEK_OS, {}, {} },
10639     { ARM::AEK_IWMMXT, {}, {} },
10640     { ARM::AEK_IWMMXT2, {}, {} },
10641     { ARM::AEK_MAVERICK, {}, {} },
10642     { ARM::AEK_XSCALE, {}, {} },
10643   };
10644 
10645   MCAsmParser &Parser = getParser();
10646 
10647   if (getLexer().isNot(AsmToken::Identifier))
10648     return Error(getLexer().getLoc(), "expected architecture extension name");
10649 
10650   StringRef Name = Parser.getTok().getString();
10651   SMLoc ExtLoc = Parser.getTok().getLoc();
10652   Lex();
10653 
10654   if (parseToken(AsmToken::EndOfStatement,
10655                  "unexpected token in '.arch_extension' directive"))
10656     return true;
10657 
10658   bool EnableFeature = true;
10659   if (Name.startswith_lower("no")) {
10660     EnableFeature = false;
10661     Name = Name.substr(2);
10662   }
10663   unsigned FeatureKind = ARM::parseArchExt(Name);
10664   if (FeatureKind == ARM::AEK_INVALID)
10665     return Error(ExtLoc, "unknown architectural extension: " + Name);
10666 
10667   for (const auto &Extension : Extensions) {
10668     if (Extension.Kind != FeatureKind)
10669       continue;
10670 
10671     if (Extension.Features.none())
10672       return Error(ExtLoc, "unsupported architectural extension: " + Name);
10673 
10674     if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
10675       return Error(ExtLoc, "architectural extension '" + Name +
10676                                "' is not "
10677                                "allowed for the current base architecture");
10678 
10679     MCSubtargetInfo &STI = copySTI();
10680     FeatureBitset ToggleFeatures = EnableFeature
10681       ? (~STI.getFeatureBits() & Extension.Features)
10682       : ( STI.getFeatureBits() & Extension.Features);
10683 
10684     FeatureBitset Features =
10685         ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
10686     setAvailableFeatures(Features);
10687     return false;
10688   }
10689 
10690   return Error(ExtLoc, "unknown architectural extension: " + Name);
10691 }
10692 
10693 // Define this matcher function after the auto-generated include so we
10694 // have the match class enum definitions.
10695 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
10696                                                   unsigned Kind) {
10697   ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
10698   // If the kind is a token for a literal immediate, check if our asm
10699   // operand matches. This is for InstAliases which have a fixed-value
10700   // immediate in the syntax.
10701   switch (Kind) {
10702   default: break;
10703   case MCK__35_0:
10704     if (Op.isImm())
10705       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
10706         if (CE->getValue() == 0)
10707           return Match_Success;
10708     break;
10709   case MCK_ModImm:
10710     if (Op.isImm()) {
10711       const MCExpr *SOExpr = Op.getImm();
10712       int64_t Value;
10713       if (!SOExpr->evaluateAsAbsolute(Value))
10714         return Match_Success;
10715       assert((Value >= std::numeric_limits<int32_t>::min() &&
10716               Value <= std::numeric_limits<uint32_t>::max()) &&
10717              "expression value must be representable in 32 bits");
10718     }
10719     break;
10720   case MCK_rGPR:
10721     if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
10722       return Match_Success;
10723     return Match_rGPR;
10724   case MCK_GPRPair:
10725     if (Op.isReg() &&
10726         MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
10727       return Match_Success;
10728     break;
10729   }
10730   return Match_InvalidOperand;
10731 }
10732