1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "ARMFeatures.h"
11 #include "Utils/ARMBaseInfo.h"
12 #include "MCTargetDesc/ARMAddressingModes.h"
13 #include "MCTargetDesc/ARMBaseInfo.h"
14 #include "MCTargetDesc/ARMMCExpr.h"
15 #include "MCTargetDesc/ARMMCTargetDesc.h"
16 #include "llvm/ADT/APFloat.h"
17 #include "llvm/ADT/APInt.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringMap.h"
23 #include "llvm/ADT/StringRef.h"
24 #include "llvm/ADT/StringSwitch.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/ADT/Twine.h"
27 #include "llvm/MC/MCContext.h"
28 #include "llvm/MC/MCExpr.h"
29 #include "llvm/MC/MCInst.h"
30 #include "llvm/MC/MCInstrDesc.h"
31 #include "llvm/MC/MCInstrInfo.h"
32 #include "llvm/MC/MCObjectFileInfo.h"
33 #include "llvm/MC/MCParser/MCAsmLexer.h"
34 #include "llvm/MC/MCParser/MCAsmParser.h"
35 #include "llvm/MC/MCParser/MCAsmParserExtension.h"
36 #include "llvm/MC/MCParser/MCAsmParserUtils.h"
37 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
38 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
39 #include "llvm/MC/MCRegisterInfo.h"
40 #include "llvm/MC/MCSection.h"
41 #include "llvm/MC/MCStreamer.h"
42 #include "llvm/MC/MCSubtargetInfo.h"
43 #include "llvm/MC/MCSymbol.h"
44 #include "llvm/MC/SubtargetFeature.h"
45 #include "llvm/Support/ARMBuildAttributes.h"
46 #include "llvm/Support/ARMEHABI.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/MathExtras.h"
52 #include "llvm/Support/SMLoc.h"
53 #include "llvm/Support/TargetParser.h"
54 #include "llvm/Support/TargetRegistry.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include <algorithm>
57 #include <cassert>
58 #include <cstddef>
59 #include <cstdint>
60 #include <iterator>
61 #include <limits>
62 #include <memory>
63 #include <string>
64 #include <utility>
65 #include <vector>
66 
67 using namespace llvm;
68 
69 namespace {
70 
71 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly };
72 
73 static cl::opt<ImplicitItModeTy> ImplicitItMode(
74     "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly),
75     cl::desc("Allow conditional instructions outdside of an IT block"),
76     cl::values(clEnumValN(ImplicitItModeTy::Always, "always",
77                           "Accept in both ISAs, emit implicit ITs in Thumb"),
78                clEnumValN(ImplicitItModeTy::Never, "never",
79                           "Warn in ARM, reject in Thumb"),
80                clEnumValN(ImplicitItModeTy::ARMOnly, "arm",
81                           "Accept in ARM, reject in Thumb"),
82                clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb",
83                           "Warn in ARM, emit implicit ITs in Thumb")));
84 
85 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes",
86                                         cl::init(false));
87 
88 cl::opt<bool>
89 DevDiags("arm-asm-parser-dev-diags", cl::init(false),
90          cl::desc("Use extended diagnostics, which include implementation "
91                   "details useful for development"));
92 
93 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
94 
95 class UnwindContext {
96   using Locs = SmallVector<SMLoc, 4>;
97 
98   MCAsmParser &Parser;
99   Locs FnStartLocs;
100   Locs CantUnwindLocs;
101   Locs PersonalityLocs;
102   Locs PersonalityIndexLocs;
103   Locs HandlerDataLocs;
104   int FPReg;
105 
106 public:
107   UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {}
108 
109   bool hasFnStart() const { return !FnStartLocs.empty(); }
110   bool cantUnwind() const { return !CantUnwindLocs.empty(); }
111   bool hasHandlerData() const { return !HandlerDataLocs.empty(); }
112 
113   bool hasPersonality() const {
114     return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty());
115   }
116 
117   void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); }
118   void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); }
119   void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); }
120   void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); }
121   void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); }
122 
123   void saveFPReg(int Reg) { FPReg = Reg; }
124   int getFPReg() const { return FPReg; }
125 
126   void emitFnStartLocNotes() const {
127     for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end();
128          FI != FE; ++FI)
129       Parser.Note(*FI, ".fnstart was specified here");
130   }
131 
132   void emitCantUnwindLocNotes() const {
133     for (Locs::const_iterator UI = CantUnwindLocs.begin(),
134                               UE = CantUnwindLocs.end(); UI != UE; ++UI)
135       Parser.Note(*UI, ".cantunwind was specified here");
136   }
137 
138   void emitHandlerDataLocNotes() const {
139     for (Locs::const_iterator HI = HandlerDataLocs.begin(),
140                               HE = HandlerDataLocs.end(); HI != HE; ++HI)
141       Parser.Note(*HI, ".handlerdata was specified here");
142   }
143 
144   void emitPersonalityLocNotes() const {
145     for (Locs::const_iterator PI = PersonalityLocs.begin(),
146                               PE = PersonalityLocs.end(),
147                               PII = PersonalityIndexLocs.begin(),
148                               PIE = PersonalityIndexLocs.end();
149          PI != PE || PII != PIE;) {
150       if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer()))
151         Parser.Note(*PI++, ".personality was specified here");
152       else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer()))
153         Parser.Note(*PII++, ".personalityindex was specified here");
154       else
155         llvm_unreachable(".personality and .personalityindex cannot be "
156                          "at the same location");
157     }
158   }
159 
160   void reset() {
161     FnStartLocs = Locs();
162     CantUnwindLocs = Locs();
163     PersonalityLocs = Locs();
164     HandlerDataLocs = Locs();
165     PersonalityIndexLocs = Locs();
166     FPReg = ARM::SP;
167   }
168 };
169 
170 class ARMAsmParser : public MCTargetAsmParser {
171   const MCRegisterInfo *MRI;
172   UnwindContext UC;
173 
174   ARMTargetStreamer &getTargetStreamer() {
175     assert(getParser().getStreamer().getTargetStreamer() &&
176            "do not have a target streamer");
177     MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
178     return static_cast<ARMTargetStreamer &>(TS);
179   }
180 
181   // Map of register aliases registers via the .req directive.
182   StringMap<unsigned> RegisterReqs;
183 
184   bool NextSymbolIsThumb;
185 
186   bool useImplicitITThumb() const {
187     return ImplicitItMode == ImplicitItModeTy::Always ||
188            ImplicitItMode == ImplicitItModeTy::ThumbOnly;
189   }
190 
191   bool useImplicitITARM() const {
192     return ImplicitItMode == ImplicitItModeTy::Always ||
193            ImplicitItMode == ImplicitItModeTy::ARMOnly;
194   }
195 
196   struct {
197     ARMCC::CondCodes Cond;    // Condition for IT block.
198     unsigned Mask:4;          // Condition mask for instructions.
199                               // Starting at first 1 (from lsb).
200                               //   '1'  condition as indicated in IT.
201                               //   '0'  inverse of condition (else).
202                               // Count of instructions in IT block is
203                               // 4 - trailingzeroes(mask)
204                               // Note that this does not have the same encoding
205                               // as in the IT instruction, which also depends
206                               // on the low bit of the condition code.
207 
208     unsigned CurPosition;     // Current position in parsing of IT
209                               // block. In range [0,4], with 0 being the IT
210                               // instruction itself. Initialized according to
211                               // count of instructions in block.  ~0U if no
212                               // active IT block.
213 
214     bool IsExplicit;          // true  - The IT instruction was present in the
215                               //         input, we should not modify it.
216                               // false - The IT instruction was added
217                               //         implicitly, we can extend it if that
218                               //         would be legal.
219   } ITState;
220 
221   SmallVector<MCInst, 4> PendingConditionalInsts;
222 
223   void flushPendingInstructions(MCStreamer &Out) override {
224     if (!inImplicitITBlock()) {
225       assert(PendingConditionalInsts.size() == 0);
226       return;
227     }
228 
229     // Emit the IT instruction
230     unsigned Mask = getITMaskEncoding();
231     MCInst ITInst;
232     ITInst.setOpcode(ARM::t2IT);
233     ITInst.addOperand(MCOperand::createImm(ITState.Cond));
234     ITInst.addOperand(MCOperand::createImm(Mask));
235     Out.EmitInstruction(ITInst, getSTI());
236 
237     // Emit the conditonal instructions
238     assert(PendingConditionalInsts.size() <= 4);
239     for (const MCInst &Inst : PendingConditionalInsts) {
240       Out.EmitInstruction(Inst, getSTI());
241     }
242     PendingConditionalInsts.clear();
243 
244     // Clear the IT state
245     ITState.Mask = 0;
246     ITState.CurPosition = ~0U;
247   }
248 
249   bool inITBlock() { return ITState.CurPosition != ~0U; }
250   bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; }
251   bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; }
252 
253   bool lastInITBlock() {
254     return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask);
255   }
256 
257   void forwardITPosition() {
258     if (!inITBlock()) return;
259     // Move to the next instruction in the IT block, if there is one. If not,
260     // mark the block as done, except for implicit IT blocks, which we leave
261     // open until we find an instruction that can't be added to it.
262     unsigned TZ = countTrailingZeros(ITState.Mask);
263     if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit)
264       ITState.CurPosition = ~0U; // Done with the IT block after this.
265   }
266 
267   // Rewind the state of the current IT block, removing the last slot from it.
268   void rewindImplicitITPosition() {
269     assert(inImplicitITBlock());
270     assert(ITState.CurPosition > 1);
271     ITState.CurPosition--;
272     unsigned TZ = countTrailingZeros(ITState.Mask);
273     unsigned NewMask = 0;
274     NewMask |= ITState.Mask & (0xC << TZ);
275     NewMask |= 0x2 << TZ;
276     ITState.Mask = NewMask;
277   }
278 
279   // Rewind the state of the current IT block, removing the last slot from it.
280   // If we were at the first slot, this closes the IT block.
281   void discardImplicitITBlock() {
282     assert(inImplicitITBlock());
283     assert(ITState.CurPosition == 1);
284     ITState.CurPosition = ~0U;
285   }
286 
287   // Return the low-subreg of a given Q register.
288   unsigned getDRegFromQReg(unsigned QReg) const {
289     return MRI->getSubReg(QReg, ARM::dsub_0);
290   }
291 
292   // Get the encoding of the IT mask, as it will appear in an IT instruction.
293   unsigned getITMaskEncoding() {
294     assert(inITBlock());
295     unsigned Mask = ITState.Mask;
296     unsigned TZ = countTrailingZeros(Mask);
297     if ((ITState.Cond & 1) == 0) {
298       assert(Mask && TZ <= 3 && "illegal IT mask value!");
299       Mask ^= (0xE << TZ) & 0xF;
300     }
301     return Mask;
302   }
303 
304   // Get the condition code corresponding to the current IT block slot.
305   ARMCC::CondCodes currentITCond() {
306     unsigned MaskBit;
307     if (ITState.CurPosition == 1)
308       MaskBit = 1;
309     else
310       MaskBit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
311 
312     return MaskBit ? ITState.Cond : ARMCC::getOppositeCondition(ITState.Cond);
313   }
314 
315   // Invert the condition of the current IT block slot without changing any
316   // other slots in the same block.
317   void invertCurrentITCondition() {
318     if (ITState.CurPosition == 1) {
319       ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond);
320     } else {
321       ITState.Mask ^= 1 << (5 - ITState.CurPosition);
322     }
323   }
324 
325   // Returns true if the current IT block is full (all 4 slots used).
326   bool isITBlockFull() {
327     return inITBlock() && (ITState.Mask & 1);
328   }
329 
330   // Extend the current implicit IT block to have one more slot with the given
331   // condition code.
332   void extendImplicitITBlock(ARMCC::CondCodes Cond) {
333     assert(inImplicitITBlock());
334     assert(!isITBlockFull());
335     assert(Cond == ITState.Cond ||
336            Cond == ARMCC::getOppositeCondition(ITState.Cond));
337     unsigned TZ = countTrailingZeros(ITState.Mask);
338     unsigned NewMask = 0;
339     // Keep any existing condition bits.
340     NewMask |= ITState.Mask & (0xE << TZ);
341     // Insert the new condition bit.
342     NewMask |= (Cond == ITState.Cond) << TZ;
343     // Move the trailing 1 down one bit.
344     NewMask |= 1 << (TZ - 1);
345     ITState.Mask = NewMask;
346   }
347 
348   // Create a new implicit IT block with a dummy condition code.
349   void startImplicitITBlock() {
350     assert(!inITBlock());
351     ITState.Cond = ARMCC::AL;
352     ITState.Mask = 8;
353     ITState.CurPosition = 1;
354     ITState.IsExplicit = false;
355   }
356 
357   // Create a new explicit IT block with the given condition and mask. The mask
358   // should be in the parsed format, with a 1 implying 't', regardless of the
359   // low bit of the condition.
360   void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) {
361     assert(!inITBlock());
362     ITState.Cond = Cond;
363     ITState.Mask = Mask;
364     ITState.CurPosition = 0;
365     ITState.IsExplicit = true;
366   }
367 
368   void Note(SMLoc L, const Twine &Msg, SMRange Range = None) {
369     return getParser().Note(L, Msg, Range);
370   }
371 
372   bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) {
373     return getParser().Warning(L, Msg, Range);
374   }
375 
376   bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) {
377     return getParser().Error(L, Msg, Range);
378   }
379 
380   bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands,
381                            unsigned ListNo, bool IsARPop = false);
382   bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands,
383                            unsigned ListNo);
384 
385   int tryParseRegister();
386   bool tryParseRegisterWithWriteBack(OperandVector &);
387   int tryParseShiftRegister(OperandVector &);
388   bool parseRegisterList(OperandVector &);
389   bool parseMemory(OperandVector &);
390   bool parseOperand(OperandVector &, StringRef Mnemonic);
391   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
392   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
393                               unsigned &ShiftAmount);
394   bool parseLiteralValues(unsigned Size, SMLoc L);
395   bool parseDirectiveThumb(SMLoc L);
396   bool parseDirectiveARM(SMLoc L);
397   bool parseDirectiveThumbFunc(SMLoc L);
398   bool parseDirectiveCode(SMLoc L);
399   bool parseDirectiveSyntax(SMLoc L);
400   bool parseDirectiveReq(StringRef Name, SMLoc L);
401   bool parseDirectiveUnreq(SMLoc L);
402   bool parseDirectiveArch(SMLoc L);
403   bool parseDirectiveEabiAttr(SMLoc L);
404   bool parseDirectiveCPU(SMLoc L);
405   bool parseDirectiveFPU(SMLoc L);
406   bool parseDirectiveFnStart(SMLoc L);
407   bool parseDirectiveFnEnd(SMLoc L);
408   bool parseDirectiveCantUnwind(SMLoc L);
409   bool parseDirectivePersonality(SMLoc L);
410   bool parseDirectiveHandlerData(SMLoc L);
411   bool parseDirectiveSetFP(SMLoc L);
412   bool parseDirectivePad(SMLoc L);
413   bool parseDirectiveRegSave(SMLoc L, bool IsVector);
414   bool parseDirectiveInst(SMLoc L, char Suffix = '\0');
415   bool parseDirectiveLtorg(SMLoc L);
416   bool parseDirectiveEven(SMLoc L);
417   bool parseDirectivePersonalityIndex(SMLoc L);
418   bool parseDirectiveUnwindRaw(SMLoc L);
419   bool parseDirectiveTLSDescSeq(SMLoc L);
420   bool parseDirectiveMovSP(SMLoc L);
421   bool parseDirectiveObjectArch(SMLoc L);
422   bool parseDirectiveArchExtension(SMLoc L);
423   bool parseDirectiveAlign(SMLoc L);
424   bool parseDirectiveThumbSet(SMLoc L);
425 
426   StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
427                           bool &CarrySetting, unsigned &ProcessorIMod,
428                           StringRef &ITMask);
429   void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
430                              bool &CanAcceptCarrySet,
431                              bool &CanAcceptPredicationCode);
432 
433   void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting,
434                                      OperandVector &Operands);
435   bool isThumb() const {
436     // FIXME: Can tablegen auto-generate this?
437     return getSTI().getFeatureBits()[ARM::ModeThumb];
438   }
439 
440   bool isThumbOne() const {
441     return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2];
442   }
443 
444   bool isThumbTwo() const {
445     return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2];
446   }
447 
448   bool hasThumb() const {
449     return getSTI().getFeatureBits()[ARM::HasV4TOps];
450   }
451 
452   bool hasThumb2() const {
453     return getSTI().getFeatureBits()[ARM::FeatureThumb2];
454   }
455 
456   bool hasV6Ops() const {
457     return getSTI().getFeatureBits()[ARM::HasV6Ops];
458   }
459 
460   bool hasV6T2Ops() const {
461     return getSTI().getFeatureBits()[ARM::HasV6T2Ops];
462   }
463 
464   bool hasV6MOps() const {
465     return getSTI().getFeatureBits()[ARM::HasV6MOps];
466   }
467 
468   bool hasV7Ops() const {
469     return getSTI().getFeatureBits()[ARM::HasV7Ops];
470   }
471 
472   bool hasV8Ops() const {
473     return getSTI().getFeatureBits()[ARM::HasV8Ops];
474   }
475 
476   bool hasV8MBaseline() const {
477     return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps];
478   }
479 
480   bool hasV8MMainline() const {
481     return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps];
482   }
483 
484   bool has8MSecExt() const {
485     return getSTI().getFeatureBits()[ARM::Feature8MSecExt];
486   }
487 
488   bool hasARM() const {
489     return !getSTI().getFeatureBits()[ARM::FeatureNoARM];
490   }
491 
492   bool hasDSP() const {
493     return getSTI().getFeatureBits()[ARM::FeatureDSP];
494   }
495 
496   bool hasD16() const {
497     return getSTI().getFeatureBits()[ARM::FeatureD16];
498   }
499 
500   bool hasV8_1aOps() const {
501     return getSTI().getFeatureBits()[ARM::HasV8_1aOps];
502   }
503 
504   bool hasRAS() const {
505     return getSTI().getFeatureBits()[ARM::FeatureRAS];
506   }
507 
508   void SwitchMode() {
509     MCSubtargetInfo &STI = copySTI();
510     uint64_t FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
511     setAvailableFeatures(FB);
512   }
513 
514   void FixModeAfterArchChange(bool WasThumb, SMLoc Loc);
515 
516   bool isMClass() const {
517     return getSTI().getFeatureBits()[ARM::FeatureMClass];
518   }
519 
520   /// @name Auto-generated Match Functions
521   /// {
522 
523 #define GET_ASSEMBLER_HEADER
524 #include "ARMGenAsmMatcher.inc"
525 
526   /// }
527 
528   OperandMatchResultTy parseITCondCode(OperandVector &);
529   OperandMatchResultTy parseCoprocNumOperand(OperandVector &);
530   OperandMatchResultTy parseCoprocRegOperand(OperandVector &);
531   OperandMatchResultTy parseCoprocOptionOperand(OperandVector &);
532   OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &);
533   OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &);
534   OperandMatchResultTy parseProcIFlagsOperand(OperandVector &);
535   OperandMatchResultTy parseMSRMaskOperand(OperandVector &);
536   OperandMatchResultTy parseBankedRegOperand(OperandVector &);
537   OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low,
538                                    int High);
539   OperandMatchResultTy parsePKHLSLImm(OperandVector &O) {
540     return parsePKHImm(O, "lsl", 0, 31);
541   }
542   OperandMatchResultTy parsePKHASRImm(OperandVector &O) {
543     return parsePKHImm(O, "asr", 1, 32);
544   }
545   OperandMatchResultTy parseSetEndImm(OperandVector &);
546   OperandMatchResultTy parseShifterImm(OperandVector &);
547   OperandMatchResultTy parseRotImm(OperandVector &);
548   OperandMatchResultTy parseModImm(OperandVector &);
549   OperandMatchResultTy parseBitfield(OperandVector &);
550   OperandMatchResultTy parsePostIdxReg(OperandVector &);
551   OperandMatchResultTy parseAM3Offset(OperandVector &);
552   OperandMatchResultTy parseFPImm(OperandVector &);
553   OperandMatchResultTy parseVectorList(OperandVector &);
554   OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
555                                        SMLoc &EndLoc);
556 
557   // Asm Match Converter Methods
558   void cvtThumbMultiply(MCInst &Inst, const OperandVector &);
559   void cvtThumbBranches(MCInst &Inst, const OperandVector &);
560 
561   bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
562   bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out);
563   bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands);
564   bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands);
565   bool isITBlockTerminator(MCInst &Inst) const;
566   void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands);
567 
568 public:
569   enum ARMMatchResultTy {
570     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
571     Match_RequiresNotITBlock,
572     Match_RequiresV6,
573     Match_RequiresThumb2,
574     Match_RequiresV8,
575     Match_RequiresFlagSetting,
576 #define GET_OPERAND_DIAGNOSTIC_TYPES
577 #include "ARMGenAsmMatcher.inc"
578 
579   };
580 
581   ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
582                const MCInstrInfo &MII, const MCTargetOptions &Options)
583     : MCTargetAsmParser(Options, STI, MII), UC(Parser) {
584     MCAsmParserExtension::Initialize(Parser);
585 
586     // Cache the MCRegisterInfo.
587     MRI = getContext().getRegisterInfo();
588 
589     // Initialize the set of available features.
590     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
591 
592     // Add build attributes based on the selected target.
593     if (AddBuildAttributes)
594       getTargetStreamer().emitTargetAttributes(STI);
595 
596     // Not in an ITBlock to start with.
597     ITState.CurPosition = ~0U;
598 
599     NextSymbolIsThumb = false;
600   }
601 
602   // Implementation of the MCTargetAsmParser interface:
603   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
604   bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
605                         SMLoc NameLoc, OperandVector &Operands) override;
606   bool ParseDirective(AsmToken DirectiveID) override;
607 
608   unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
609                                       unsigned Kind) override;
610   unsigned checkTargetMatchPredicate(MCInst &Inst) override;
611 
612   bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
613                                OperandVector &Operands, MCStreamer &Out,
614                                uint64_t &ErrorInfo,
615                                bool MatchingInlineAsm) override;
616   unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst,
617                             SmallVectorImpl<NearMissInfo> &NearMisses,
618                             bool MatchingInlineAsm, bool &EmitInITBlock,
619                             MCStreamer &Out);
620 
621   struct NearMissMessage {
622     SMLoc Loc;
623     SmallString<128> Message;
624   };
625 
626   const char *getCustomOperandDiag(ARMMatchResultTy MatchError);
627 
628   void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
629                         SmallVectorImpl<NearMissMessage> &NearMissesOut,
630                         SMLoc IDLoc, OperandVector &Operands);
631   void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc,
632                         OperandVector &Operands);
633 
634   void onLabelParsed(MCSymbol *Symbol) override;
635 };
636 
637 /// ARMOperand - Instances of this class represent a parsed ARM machine
638 /// operand.
639 class ARMOperand : public MCParsedAsmOperand {
640   enum KindTy {
641     k_CondCode,
642     k_CCOut,
643     k_ITCondMask,
644     k_CoprocNum,
645     k_CoprocReg,
646     k_CoprocOption,
647     k_Immediate,
648     k_MemBarrierOpt,
649     k_InstSyncBarrierOpt,
650     k_Memory,
651     k_PostIndexRegister,
652     k_MSRMask,
653     k_BankedReg,
654     k_ProcIFlags,
655     k_VectorIndex,
656     k_Register,
657     k_RegisterList,
658     k_DPRRegisterList,
659     k_SPRRegisterList,
660     k_VectorList,
661     k_VectorListAllLanes,
662     k_VectorListIndexed,
663     k_ShiftedRegister,
664     k_ShiftedImmediate,
665     k_ShifterImmediate,
666     k_RotateImmediate,
667     k_ModifiedImmediate,
668     k_ConstantPoolImmediate,
669     k_BitfieldDescriptor,
670     k_Token,
671   } Kind;
672 
673   SMLoc StartLoc, EndLoc, AlignmentLoc;
674   SmallVector<unsigned, 8> Registers;
675 
676   struct CCOp {
677     ARMCC::CondCodes Val;
678   };
679 
680   struct CopOp {
681     unsigned Val;
682   };
683 
684   struct CoprocOptionOp {
685     unsigned Val;
686   };
687 
688   struct ITMaskOp {
689     unsigned Mask:4;
690   };
691 
692   struct MBOptOp {
693     ARM_MB::MemBOpt Val;
694   };
695 
696   struct ISBOptOp {
697     ARM_ISB::InstSyncBOpt Val;
698   };
699 
700   struct IFlagsOp {
701     ARM_PROC::IFlags Val;
702   };
703 
704   struct MMaskOp {
705     unsigned Val;
706   };
707 
708   struct BankedRegOp {
709     unsigned Val;
710   };
711 
712   struct TokOp {
713     const char *Data;
714     unsigned Length;
715   };
716 
717   struct RegOp {
718     unsigned RegNum;
719   };
720 
721   // A vector register list is a sequential list of 1 to 4 registers.
722   struct VectorListOp {
723     unsigned RegNum;
724     unsigned Count;
725     unsigned LaneIndex;
726     bool isDoubleSpaced;
727   };
728 
729   struct VectorIndexOp {
730     unsigned Val;
731   };
732 
733   struct ImmOp {
734     const MCExpr *Val;
735   };
736 
737   /// Combined record for all forms of ARM address expressions.
738   struct MemoryOp {
739     unsigned BaseRegNum;
740     // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
741     // was specified.
742     const MCConstantExpr *OffsetImm;  // Offset immediate value
743     unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
744     ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
745     unsigned ShiftImm;        // shift for OffsetReg.
746     unsigned Alignment;       // 0 = no alignment specified
747     // n = alignment in bytes (2, 4, 8, 16, or 32)
748     unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
749   };
750 
751   struct PostIdxRegOp {
752     unsigned RegNum;
753     bool isAdd;
754     ARM_AM::ShiftOpc ShiftTy;
755     unsigned ShiftImm;
756   };
757 
758   struct ShifterImmOp {
759     bool isASR;
760     unsigned Imm;
761   };
762 
763   struct RegShiftedRegOp {
764     ARM_AM::ShiftOpc ShiftTy;
765     unsigned SrcReg;
766     unsigned ShiftReg;
767     unsigned ShiftImm;
768   };
769 
770   struct RegShiftedImmOp {
771     ARM_AM::ShiftOpc ShiftTy;
772     unsigned SrcReg;
773     unsigned ShiftImm;
774   };
775 
776   struct RotImmOp {
777     unsigned Imm;
778   };
779 
780   struct ModImmOp {
781     unsigned Bits;
782     unsigned Rot;
783   };
784 
785   struct BitfieldOp {
786     unsigned LSB;
787     unsigned Width;
788   };
789 
790   union {
791     struct CCOp CC;
792     struct CopOp Cop;
793     struct CoprocOptionOp CoprocOption;
794     struct MBOptOp MBOpt;
795     struct ISBOptOp ISBOpt;
796     struct ITMaskOp ITMask;
797     struct IFlagsOp IFlags;
798     struct MMaskOp MMask;
799     struct BankedRegOp BankedReg;
800     struct TokOp Tok;
801     struct RegOp Reg;
802     struct VectorListOp VectorList;
803     struct VectorIndexOp VectorIndex;
804     struct ImmOp Imm;
805     struct MemoryOp Memory;
806     struct PostIdxRegOp PostIdxReg;
807     struct ShifterImmOp ShifterImm;
808     struct RegShiftedRegOp RegShiftedReg;
809     struct RegShiftedImmOp RegShiftedImm;
810     struct RotImmOp RotImm;
811     struct ModImmOp ModImm;
812     struct BitfieldOp Bitfield;
813   };
814 
815 public:
816   ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
817 
818   /// getStartLoc - Get the location of the first token of this operand.
819   SMLoc getStartLoc() const override { return StartLoc; }
820 
821   /// getEndLoc - Get the location of the last token of this operand.
822   SMLoc getEndLoc() const override { return EndLoc; }
823 
824   /// getLocRange - Get the range between the first and last token of this
825   /// operand.
826   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
827 
828   /// getAlignmentLoc - Get the location of the Alignment token of this operand.
829   SMLoc getAlignmentLoc() const {
830     assert(Kind == k_Memory && "Invalid access!");
831     return AlignmentLoc;
832   }
833 
834   ARMCC::CondCodes getCondCode() const {
835     assert(Kind == k_CondCode && "Invalid access!");
836     return CC.Val;
837   }
838 
839   unsigned getCoproc() const {
840     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
841     return Cop.Val;
842   }
843 
844   StringRef getToken() const {
845     assert(Kind == k_Token && "Invalid access!");
846     return StringRef(Tok.Data, Tok.Length);
847   }
848 
849   unsigned getReg() const override {
850     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
851     return Reg.RegNum;
852   }
853 
854   const SmallVectorImpl<unsigned> &getRegList() const {
855     assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
856             Kind == k_SPRRegisterList) && "Invalid access!");
857     return Registers;
858   }
859 
860   const MCExpr *getImm() const {
861     assert(isImm() && "Invalid access!");
862     return Imm.Val;
863   }
864 
865   const MCExpr *getConstantPoolImm() const {
866     assert(isConstantPoolImm() && "Invalid access!");
867     return Imm.Val;
868   }
869 
870   unsigned getVectorIndex() const {
871     assert(Kind == k_VectorIndex && "Invalid access!");
872     return VectorIndex.Val;
873   }
874 
875   ARM_MB::MemBOpt getMemBarrierOpt() const {
876     assert(Kind == k_MemBarrierOpt && "Invalid access!");
877     return MBOpt.Val;
878   }
879 
880   ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const {
881     assert(Kind == k_InstSyncBarrierOpt && "Invalid access!");
882     return ISBOpt.Val;
883   }
884 
885   ARM_PROC::IFlags getProcIFlags() const {
886     assert(Kind == k_ProcIFlags && "Invalid access!");
887     return IFlags.Val;
888   }
889 
890   unsigned getMSRMask() const {
891     assert(Kind == k_MSRMask && "Invalid access!");
892     return MMask.Val;
893   }
894 
895   unsigned getBankedReg() const {
896     assert(Kind == k_BankedReg && "Invalid access!");
897     return BankedReg.Val;
898   }
899 
900   bool isCoprocNum() const { return Kind == k_CoprocNum; }
901   bool isCoprocReg() const { return Kind == k_CoprocReg; }
902   bool isCoprocOption() const { return Kind == k_CoprocOption; }
903   bool isCondCode() const { return Kind == k_CondCode; }
904   bool isCCOut() const { return Kind == k_CCOut; }
905   bool isITMask() const { return Kind == k_ITCondMask; }
906   bool isITCondCode() const { return Kind == k_CondCode; }
907   bool isImm() const override {
908     return Kind == k_Immediate;
909   }
910 
911   bool isARMBranchTarget() const {
912     if (!isImm()) return false;
913 
914     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
915       return CE->getValue() % 4 == 0;
916     return true;
917   }
918 
919 
920   bool isThumbBranchTarget() const {
921     if (!isImm()) return false;
922 
923     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()))
924       return CE->getValue() % 2 == 0;
925     return true;
926   }
927 
928   // checks whether this operand is an unsigned offset which fits is a field
929   // of specified width and scaled by a specific number of bits
930   template<unsigned width, unsigned scale>
931   bool isUnsignedOffset() const {
932     if (!isImm()) return false;
933     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
934     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
935       int64_t Val = CE->getValue();
936       int64_t Align = 1LL << scale;
937       int64_t Max = Align * ((1LL << width) - 1);
938       return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max);
939     }
940     return false;
941   }
942 
943   // checks whether this operand is an signed offset which fits is a field
944   // of specified width and scaled by a specific number of bits
945   template<unsigned width, unsigned scale>
946   bool isSignedOffset() const {
947     if (!isImm()) return false;
948     if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
949     if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
950       int64_t Val = CE->getValue();
951       int64_t Align = 1LL << scale;
952       int64_t Max = Align * ((1LL << (width-1)) - 1);
953       int64_t Min = -Align * (1LL << (width-1));
954       return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max);
955     }
956     return false;
957   }
958 
959   // checks whether this operand is a memory operand computed as an offset
960   // applied to PC. the offset may have 8 bits of magnitude and is represented
961   // with two bits of shift. textually it may be either [pc, #imm], #imm or
962   // relocable expression...
963   bool isThumbMemPC() const {
964     int64_t Val = 0;
965     if (isImm()) {
966       if (isa<MCSymbolRefExpr>(Imm.Val)) return true;
967       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val);
968       if (!CE) return false;
969       Val = CE->getValue();
970     }
971     else if (isMem()) {
972       if(!Memory.OffsetImm || Memory.OffsetRegNum) return false;
973       if(Memory.BaseRegNum != ARM::PC) return false;
974       Val = Memory.OffsetImm->getValue();
975     }
976     else return false;
977     return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020);
978   }
979 
980   bool isFPImm() const {
981     if (!isImm()) return false;
982     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
983     if (!CE) return false;
984     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
985     return Val != -1;
986   }
987 
988   template<int64_t N, int64_t M>
989   bool isImmediate() const {
990     if (!isImm()) return false;
991     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
992     if (!CE) return false;
993     int64_t Value = CE->getValue();
994     return Value >= N && Value <= M;
995   }
996 
997   template<int64_t N, int64_t M>
998   bool isImmediateS4() const {
999     if (!isImm()) return false;
1000     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1001     if (!CE) return false;
1002     int64_t Value = CE->getValue();
1003     return ((Value & 3) == 0) && Value >= N && Value <= M;
1004   }
1005 
1006   bool isFBits16() const {
1007     return isImmediate<0, 17>();
1008   }
1009   bool isFBits32() const {
1010     return isImmediate<1, 33>();
1011   }
1012   bool isImm8s4() const {
1013     return isImmediateS4<-1020, 1020>();
1014   }
1015   bool isImm0_1020s4() const {
1016     return isImmediateS4<0, 1020>();
1017   }
1018   bool isImm0_508s4() const {
1019     return isImmediateS4<0, 508>();
1020   }
1021   bool isImm0_508s4Neg() const {
1022     if (!isImm()) return false;
1023     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1024     if (!CE) return false;
1025     int64_t Value = -CE->getValue();
1026     // explicitly exclude zero. we want that to use the normal 0_508 version.
1027     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
1028   }
1029 
1030   bool isImm0_4095Neg() const {
1031     if (!isImm()) return false;
1032     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1033     if (!CE) return false;
1034     int64_t Value = -CE->getValue();
1035     return Value > 0 && Value < 4096;
1036   }
1037 
1038   bool isImm0_7() const {
1039     return isImmediate<0, 7>();
1040   }
1041 
1042   bool isImm1_16() const {
1043     return isImmediate<1, 16>();
1044   }
1045 
1046   bool isImm1_32() const {
1047     return isImmediate<1, 32>();
1048   }
1049 
1050   bool isImm8_255() const {
1051     return isImmediate<8, 255>();
1052   }
1053 
1054   bool isImm256_65535Expr() const {
1055     if (!isImm()) return false;
1056     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1057     // If it's not a constant expression, it'll generate a fixup and be
1058     // handled later.
1059     if (!CE) return true;
1060     int64_t Value = CE->getValue();
1061     return Value >= 256 && Value < 65536;
1062   }
1063 
1064   bool isImm0_65535Expr() const {
1065     if (!isImm()) return false;
1066     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1067     // If it's not a constant expression, it'll generate a fixup and be
1068     // handled later.
1069     if (!CE) return true;
1070     int64_t Value = CE->getValue();
1071     return Value >= 0 && Value < 65536;
1072   }
1073 
1074   bool isImm24bit() const {
1075     return isImmediate<0, 0xffffff + 1>();
1076   }
1077 
1078   bool isImmThumbSR() const {
1079     return isImmediate<1, 33>();
1080   }
1081 
1082   bool isPKHLSLImm() const {
1083     return isImmediate<0, 32>();
1084   }
1085 
1086   bool isPKHASRImm() const {
1087     return isImmediate<0, 33>();
1088   }
1089 
1090   bool isAdrLabel() const {
1091     // If we have an immediate that's not a constant, treat it as a label
1092     // reference needing a fixup.
1093     if (isImm() && !isa<MCConstantExpr>(getImm()))
1094       return true;
1095 
1096     // If it is a constant, it must fit into a modified immediate encoding.
1097     if (!isImm()) return false;
1098     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1099     if (!CE) return false;
1100     int64_t Value = CE->getValue();
1101     return (ARM_AM::getSOImmVal(Value) != -1 ||
1102             ARM_AM::getSOImmVal(-Value) != -1);
1103   }
1104 
1105   bool isT2SOImm() const {
1106     // If we have an immediate that's not a constant, treat it as an expression
1107     // needing a fixup.
1108     if (isImm() && !isa<MCConstantExpr>(getImm())) {
1109       // We want to avoid matching :upper16: and :lower16: as we want these
1110       // expressions to match in isImm0_65535Expr()
1111       const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm());
1112       return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
1113                              ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16));
1114     }
1115     if (!isImm()) return false;
1116     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1117     if (!CE) return false;
1118     int64_t Value = CE->getValue();
1119     return ARM_AM::getT2SOImmVal(Value) != -1;
1120   }
1121 
1122   bool isT2SOImmNot() const {
1123     if (!isImm()) return false;
1124     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1125     if (!CE) return false;
1126     int64_t Value = CE->getValue();
1127     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1128       ARM_AM::getT2SOImmVal(~Value) != -1;
1129   }
1130 
1131   bool isT2SOImmNeg() const {
1132     if (!isImm()) return false;
1133     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1134     if (!CE) return false;
1135     int64_t Value = CE->getValue();
1136     // Only use this when not representable as a plain so_imm.
1137     return ARM_AM::getT2SOImmVal(Value) == -1 &&
1138       ARM_AM::getT2SOImmVal(-Value) != -1;
1139   }
1140 
1141   bool isSetEndImm() const {
1142     if (!isImm()) return false;
1143     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1144     if (!CE) return false;
1145     int64_t Value = CE->getValue();
1146     return Value == 1 || Value == 0;
1147   }
1148 
1149   bool isReg() const override { return Kind == k_Register; }
1150   bool isRegList() const { return Kind == k_RegisterList; }
1151   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
1152   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
1153   bool isToken() const override { return Kind == k_Token; }
1154   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
1155   bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; }
1156   bool isMem() const override { return Kind == k_Memory; }
1157   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
1158   bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
1159   bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
1160   bool isRotImm() const { return Kind == k_RotateImmediate; }
1161   bool isModImm() const { return Kind == k_ModifiedImmediate; }
1162 
1163   bool isModImmNot() const {
1164     if (!isImm()) return false;
1165     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1166     if (!CE) return false;
1167     int64_t Value = CE->getValue();
1168     return ARM_AM::getSOImmVal(~Value) != -1;
1169   }
1170 
1171   bool isModImmNeg() const {
1172     if (!isImm()) return false;
1173     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1174     if (!CE) return false;
1175     int64_t Value = CE->getValue();
1176     return ARM_AM::getSOImmVal(Value) == -1 &&
1177       ARM_AM::getSOImmVal(-Value) != -1;
1178   }
1179 
1180   bool isThumbModImmNeg1_7() const {
1181     if (!isImm()) return false;
1182     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1183     if (!CE) return false;
1184     int32_t Value = -(int32_t)CE->getValue();
1185     return 0 < Value && Value < 8;
1186   }
1187 
1188   bool isThumbModImmNeg8_255() const {
1189     if (!isImm()) return false;
1190     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1191     if (!CE) return false;
1192     int32_t Value = -(int32_t)CE->getValue();
1193     return 7 < Value && Value < 256;
1194   }
1195 
1196   bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; }
1197   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
1198   bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
1199   bool isPostIdxReg() const {
1200     return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
1201   }
1202   bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const {
1203     if (!isMem())
1204       return false;
1205     // No offset of any kind.
1206     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr &&
1207      (alignOK || Memory.Alignment == Alignment);
1208   }
1209   bool isMemPCRelImm12() const {
1210     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1211       return false;
1212     // Base register must be PC.
1213     if (Memory.BaseRegNum != ARM::PC)
1214       return false;
1215     // Immediate offset in range [-4095, 4095].
1216     if (!Memory.OffsetImm) return true;
1217     int64_t Val = Memory.OffsetImm->getValue();
1218     return (Val > -4096 && Val < 4096) ||
1219            (Val == std::numeric_limits<int32_t>::min());
1220   }
1221 
1222   bool isAlignedMemory() const {
1223     return isMemNoOffset(true);
1224   }
1225 
1226   bool isAlignedMemoryNone() const {
1227     return isMemNoOffset(false, 0);
1228   }
1229 
1230   bool isDupAlignedMemoryNone() const {
1231     return isMemNoOffset(false, 0);
1232   }
1233 
1234   bool isAlignedMemory16() const {
1235     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1236       return true;
1237     return isMemNoOffset(false, 0);
1238   }
1239 
1240   bool isDupAlignedMemory16() const {
1241     if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2.
1242       return true;
1243     return isMemNoOffset(false, 0);
1244   }
1245 
1246   bool isAlignedMemory32() const {
1247     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1248       return true;
1249     return isMemNoOffset(false, 0);
1250   }
1251 
1252   bool isDupAlignedMemory32() const {
1253     if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4.
1254       return true;
1255     return isMemNoOffset(false, 0);
1256   }
1257 
1258   bool isAlignedMemory64() const {
1259     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1260       return true;
1261     return isMemNoOffset(false, 0);
1262   }
1263 
1264   bool isDupAlignedMemory64() const {
1265     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1266       return true;
1267     return isMemNoOffset(false, 0);
1268   }
1269 
1270   bool isAlignedMemory64or128() const {
1271     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1272       return true;
1273     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1274       return true;
1275     return isMemNoOffset(false, 0);
1276   }
1277 
1278   bool isDupAlignedMemory64or128() const {
1279     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1280       return true;
1281     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1282       return true;
1283     return isMemNoOffset(false, 0);
1284   }
1285 
1286   bool isAlignedMemory64or128or256() const {
1287     if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8.
1288       return true;
1289     if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16.
1290       return true;
1291     if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32.
1292       return true;
1293     return isMemNoOffset(false, 0);
1294   }
1295 
1296   bool isAddrMode2() const {
1297     if (!isMem() || Memory.Alignment != 0) return false;
1298     // Check for register offset.
1299     if (Memory.OffsetRegNum) return true;
1300     // Immediate offset in range [-4095, 4095].
1301     if (!Memory.OffsetImm) return true;
1302     int64_t Val = Memory.OffsetImm->getValue();
1303     return Val > -4096 && Val < 4096;
1304   }
1305 
1306   bool isAM2OffsetImm() const {
1307     if (!isImm()) return false;
1308     // Immediate offset in range [-4095, 4095].
1309     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1310     if (!CE) return false;
1311     int64_t Val = CE->getValue();
1312     return (Val == std::numeric_limits<int32_t>::min()) ||
1313            (Val > -4096 && Val < 4096);
1314   }
1315 
1316   bool isAddrMode3() const {
1317     // If we have an immediate that's not a constant, treat it as a label
1318     // reference needing a fixup. If it is a constant, it's something else
1319     // and we reject it.
1320     if (isImm() && !isa<MCConstantExpr>(getImm()))
1321       return true;
1322     if (!isMem() || Memory.Alignment != 0) return false;
1323     // No shifts are legal for AM3.
1324     if (Memory.ShiftType != ARM_AM::no_shift) return false;
1325     // Check for register offset.
1326     if (Memory.OffsetRegNum) return true;
1327     // Immediate offset in range [-255, 255].
1328     if (!Memory.OffsetImm) return true;
1329     int64_t Val = Memory.OffsetImm->getValue();
1330     // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we
1331     // have to check for this too.
1332     return (Val > -256 && Val < 256) ||
1333            Val == std::numeric_limits<int32_t>::min();
1334   }
1335 
1336   bool isAM3Offset() const {
1337     if (Kind != k_Immediate && Kind != k_PostIndexRegister)
1338       return false;
1339     if (Kind == k_PostIndexRegister)
1340       return PostIdxReg.ShiftTy == ARM_AM::no_shift;
1341     // Immediate offset in range [-255, 255].
1342     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1343     if (!CE) return false;
1344     int64_t Val = CE->getValue();
1345     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1346     return (Val > -256 && Val < 256) ||
1347            Val == std::numeric_limits<int32_t>::min();
1348   }
1349 
1350   bool isAddrMode5() const {
1351     // If we have an immediate that's not a constant, treat it as a label
1352     // reference needing a fixup. If it is a constant, it's something else
1353     // and we reject it.
1354     if (isImm() && !isa<MCConstantExpr>(getImm()))
1355       return true;
1356     if (!isMem() || Memory.Alignment != 0) return false;
1357     // Check for register offset.
1358     if (Memory.OffsetRegNum) return false;
1359     // Immediate offset in range [-1020, 1020] and a multiple of 4.
1360     if (!Memory.OffsetImm) return true;
1361     int64_t Val = Memory.OffsetImm->getValue();
1362     return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
1363       Val == std::numeric_limits<int32_t>::min();
1364   }
1365 
1366   bool isAddrMode5FP16() const {
1367     // If we have an immediate that's not a constant, treat it as a label
1368     // reference needing a fixup. If it is a constant, it's something else
1369     // and we reject it.
1370     if (isImm() && !isa<MCConstantExpr>(getImm()))
1371       return true;
1372     if (!isMem() || Memory.Alignment != 0) return false;
1373     // Check for register offset.
1374     if (Memory.OffsetRegNum) return false;
1375     // Immediate offset in range [-510, 510] and a multiple of 2.
1376     if (!Memory.OffsetImm) return true;
1377     int64_t Val = Memory.OffsetImm->getValue();
1378     return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) ||
1379            Val == std::numeric_limits<int32_t>::min();
1380   }
1381 
1382   bool isMemTBB() const {
1383     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1384         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1385       return false;
1386     return true;
1387   }
1388 
1389   bool isMemTBH() const {
1390     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1391         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
1392         Memory.Alignment != 0 )
1393       return false;
1394     return true;
1395   }
1396 
1397   bool isMemRegOffset() const {
1398     if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
1399       return false;
1400     return true;
1401   }
1402 
1403   bool isT2MemRegOffset() const {
1404     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1405         Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC)
1406       return false;
1407     // Only lsl #{0, 1, 2, 3} allowed.
1408     if (Memory.ShiftType == ARM_AM::no_shift)
1409       return true;
1410     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
1411       return false;
1412     return true;
1413   }
1414 
1415   bool isMemThumbRR() const {
1416     // Thumb reg+reg addressing is simple. Just two registers, a base and
1417     // an offset. No shifts, negations or any other complicating factors.
1418     if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1419         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1420       return false;
1421     return isARMLowRegister(Memory.BaseRegNum) &&
1422       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1423   }
1424 
1425   bool isMemThumbRIs4() const {
1426     if (!isMem() || Memory.OffsetRegNum != 0 ||
1427         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1428       return false;
1429     // Immediate offset, multiple of 4 in range [0, 124].
1430     if (!Memory.OffsetImm) return true;
1431     int64_t Val = Memory.OffsetImm->getValue();
1432     return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1433   }
1434 
1435   bool isMemThumbRIs2() const {
1436     if (!isMem() || Memory.OffsetRegNum != 0 ||
1437         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1438       return false;
1439     // Immediate offset, multiple of 4 in range [0, 62].
1440     if (!Memory.OffsetImm) return true;
1441     int64_t Val = Memory.OffsetImm->getValue();
1442     return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1443   }
1444 
1445   bool isMemThumbRIs1() const {
1446     if (!isMem() || Memory.OffsetRegNum != 0 ||
1447         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1448       return false;
1449     // Immediate offset in range [0, 31].
1450     if (!Memory.OffsetImm) return true;
1451     int64_t Val = Memory.OffsetImm->getValue();
1452     return Val >= 0 && Val <= 31;
1453   }
1454 
1455   bool isMemThumbSPI() const {
1456     if (!isMem() || Memory.OffsetRegNum != 0 ||
1457         Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1458       return false;
1459     // Immediate offset, multiple of 4 in range [0, 1020].
1460     if (!Memory.OffsetImm) return true;
1461     int64_t Val = Memory.OffsetImm->getValue();
1462     return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1463   }
1464 
1465   bool isMemImm8s4Offset() const {
1466     // If we have an immediate that's not a constant, treat it as a label
1467     // reference needing a fixup. If it is a constant, it's something else
1468     // and we reject it.
1469     if (isImm() && !isa<MCConstantExpr>(getImm()))
1470       return true;
1471     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1472       return false;
1473     // Immediate offset a multiple of 4 in range [-1020, 1020].
1474     if (!Memory.OffsetImm) return true;
1475     int64_t Val = Memory.OffsetImm->getValue();
1476     // Special case, #-0 is std::numeric_limits<int32_t>::min().
1477     return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) ||
1478            Val == std::numeric_limits<int32_t>::min();
1479   }
1480 
1481   bool isMemImm0_1020s4Offset() const {
1482     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1483       return false;
1484     // Immediate offset a multiple of 4 in range [0, 1020].
1485     if (!Memory.OffsetImm) return true;
1486     int64_t Val = Memory.OffsetImm->getValue();
1487     return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1488   }
1489 
1490   bool isMemImm8Offset() const {
1491     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1492       return false;
1493     // Base reg of PC isn't allowed for these encodings.
1494     if (Memory.BaseRegNum == ARM::PC) return false;
1495     // Immediate offset in range [-255, 255].
1496     if (!Memory.OffsetImm) return true;
1497     int64_t Val = Memory.OffsetImm->getValue();
1498     return (Val == std::numeric_limits<int32_t>::min()) ||
1499            (Val > -256 && Val < 256);
1500   }
1501 
1502   bool isMemPosImm8Offset() const {
1503     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1504       return false;
1505     // Immediate offset in range [0, 255].
1506     if (!Memory.OffsetImm) return true;
1507     int64_t Val = Memory.OffsetImm->getValue();
1508     return Val >= 0 && Val < 256;
1509   }
1510 
1511   bool isMemNegImm8Offset() const {
1512     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1513       return false;
1514     // Base reg of PC isn't allowed for these encodings.
1515     if (Memory.BaseRegNum == ARM::PC) return false;
1516     // Immediate offset in range [-255, -1].
1517     if (!Memory.OffsetImm) return false;
1518     int64_t Val = Memory.OffsetImm->getValue();
1519     return (Val == std::numeric_limits<int32_t>::min()) ||
1520            (Val > -256 && Val < 0);
1521   }
1522 
1523   bool isMemUImm12Offset() const {
1524     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1525       return false;
1526     // Immediate offset in range [0, 4095].
1527     if (!Memory.OffsetImm) return true;
1528     int64_t Val = Memory.OffsetImm->getValue();
1529     return (Val >= 0 && Val < 4096);
1530   }
1531 
1532   bool isMemImm12Offset() const {
1533     // If we have an immediate that's not a constant, treat it as a label
1534     // reference needing a fixup. If it is a constant, it's something else
1535     // and we reject it.
1536 
1537     if (isImm() && !isa<MCConstantExpr>(getImm()))
1538       return true;
1539 
1540     if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1541       return false;
1542     // Immediate offset in range [-4095, 4095].
1543     if (!Memory.OffsetImm) return true;
1544     int64_t Val = Memory.OffsetImm->getValue();
1545     return (Val > -4096 && Val < 4096) ||
1546            (Val == std::numeric_limits<int32_t>::min());
1547   }
1548 
1549   bool isConstPoolAsmImm() const {
1550     // Delay processing of Constant Pool Immediate, this will turn into
1551     // a constant. Match no other operand
1552     return (isConstantPoolImm());
1553   }
1554 
1555   bool isPostIdxImm8() const {
1556     if (!isImm()) return false;
1557     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1558     if (!CE) return false;
1559     int64_t Val = CE->getValue();
1560     return (Val > -256 && Val < 256) ||
1561            (Val == std::numeric_limits<int32_t>::min());
1562   }
1563 
1564   bool isPostIdxImm8s4() const {
1565     if (!isImm()) return false;
1566     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1567     if (!CE) return false;
1568     int64_t Val = CE->getValue();
1569     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1570            (Val == std::numeric_limits<int32_t>::min());
1571   }
1572 
1573   bool isMSRMask() const { return Kind == k_MSRMask; }
1574   bool isBankedReg() const { return Kind == k_BankedReg; }
1575   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1576 
1577   // NEON operands.
1578   bool isSingleSpacedVectorList() const {
1579     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1580   }
1581 
1582   bool isDoubleSpacedVectorList() const {
1583     return Kind == k_VectorList && VectorList.isDoubleSpaced;
1584   }
1585 
1586   bool isVecListOneD() const {
1587     if (!isSingleSpacedVectorList()) return false;
1588     return VectorList.Count == 1;
1589   }
1590 
1591   bool isVecListDPair() const {
1592     if (!isSingleSpacedVectorList()) return false;
1593     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1594               .contains(VectorList.RegNum));
1595   }
1596 
1597   bool isVecListThreeD() const {
1598     if (!isSingleSpacedVectorList()) return false;
1599     return VectorList.Count == 3;
1600   }
1601 
1602   bool isVecListFourD() const {
1603     if (!isSingleSpacedVectorList()) return false;
1604     return VectorList.Count == 4;
1605   }
1606 
1607   bool isVecListDPairSpaced() const {
1608     if (Kind != k_VectorList) return false;
1609     if (isSingleSpacedVectorList()) return false;
1610     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1611               .contains(VectorList.RegNum));
1612   }
1613 
1614   bool isVecListThreeQ() const {
1615     if (!isDoubleSpacedVectorList()) return false;
1616     return VectorList.Count == 3;
1617   }
1618 
1619   bool isVecListFourQ() const {
1620     if (!isDoubleSpacedVectorList()) return false;
1621     return VectorList.Count == 4;
1622   }
1623 
1624   bool isSingleSpacedVectorAllLanes() const {
1625     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1626   }
1627 
1628   bool isDoubleSpacedVectorAllLanes() const {
1629     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1630   }
1631 
1632   bool isVecListOneDAllLanes() const {
1633     if (!isSingleSpacedVectorAllLanes()) return false;
1634     return VectorList.Count == 1;
1635   }
1636 
1637   bool isVecListDPairAllLanes() const {
1638     if (!isSingleSpacedVectorAllLanes()) return false;
1639     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1640               .contains(VectorList.RegNum));
1641   }
1642 
1643   bool isVecListDPairSpacedAllLanes() const {
1644     if (!isDoubleSpacedVectorAllLanes()) return false;
1645     return VectorList.Count == 2;
1646   }
1647 
1648   bool isVecListThreeDAllLanes() const {
1649     if (!isSingleSpacedVectorAllLanes()) return false;
1650     return VectorList.Count == 3;
1651   }
1652 
1653   bool isVecListThreeQAllLanes() const {
1654     if (!isDoubleSpacedVectorAllLanes()) return false;
1655     return VectorList.Count == 3;
1656   }
1657 
1658   bool isVecListFourDAllLanes() const {
1659     if (!isSingleSpacedVectorAllLanes()) return false;
1660     return VectorList.Count == 4;
1661   }
1662 
1663   bool isVecListFourQAllLanes() const {
1664     if (!isDoubleSpacedVectorAllLanes()) return false;
1665     return VectorList.Count == 4;
1666   }
1667 
1668   bool isSingleSpacedVectorIndexed() const {
1669     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1670   }
1671 
1672   bool isDoubleSpacedVectorIndexed() const {
1673     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1674   }
1675 
1676   bool isVecListOneDByteIndexed() const {
1677     if (!isSingleSpacedVectorIndexed()) return false;
1678     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1679   }
1680 
1681   bool isVecListOneDHWordIndexed() const {
1682     if (!isSingleSpacedVectorIndexed()) return false;
1683     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1684   }
1685 
1686   bool isVecListOneDWordIndexed() const {
1687     if (!isSingleSpacedVectorIndexed()) return false;
1688     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1689   }
1690 
1691   bool isVecListTwoDByteIndexed() const {
1692     if (!isSingleSpacedVectorIndexed()) return false;
1693     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1694   }
1695 
1696   bool isVecListTwoDHWordIndexed() const {
1697     if (!isSingleSpacedVectorIndexed()) return false;
1698     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1699   }
1700 
1701   bool isVecListTwoQWordIndexed() const {
1702     if (!isDoubleSpacedVectorIndexed()) return false;
1703     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1704   }
1705 
1706   bool isVecListTwoQHWordIndexed() const {
1707     if (!isDoubleSpacedVectorIndexed()) return false;
1708     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1709   }
1710 
1711   bool isVecListTwoDWordIndexed() const {
1712     if (!isSingleSpacedVectorIndexed()) return false;
1713     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1714   }
1715 
1716   bool isVecListThreeDByteIndexed() const {
1717     if (!isSingleSpacedVectorIndexed()) return false;
1718     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1719   }
1720 
1721   bool isVecListThreeDHWordIndexed() const {
1722     if (!isSingleSpacedVectorIndexed()) return false;
1723     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1724   }
1725 
1726   bool isVecListThreeQWordIndexed() const {
1727     if (!isDoubleSpacedVectorIndexed()) return false;
1728     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1729   }
1730 
1731   bool isVecListThreeQHWordIndexed() const {
1732     if (!isDoubleSpacedVectorIndexed()) return false;
1733     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1734   }
1735 
1736   bool isVecListThreeDWordIndexed() const {
1737     if (!isSingleSpacedVectorIndexed()) return false;
1738     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1739   }
1740 
1741   bool isVecListFourDByteIndexed() const {
1742     if (!isSingleSpacedVectorIndexed()) return false;
1743     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1744   }
1745 
1746   bool isVecListFourDHWordIndexed() const {
1747     if (!isSingleSpacedVectorIndexed()) return false;
1748     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1749   }
1750 
1751   bool isVecListFourQWordIndexed() const {
1752     if (!isDoubleSpacedVectorIndexed()) return false;
1753     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1754   }
1755 
1756   bool isVecListFourQHWordIndexed() const {
1757     if (!isDoubleSpacedVectorIndexed()) return false;
1758     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1759   }
1760 
1761   bool isVecListFourDWordIndexed() const {
1762     if (!isSingleSpacedVectorIndexed()) return false;
1763     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1764   }
1765 
1766   bool isVectorIndex8() const {
1767     if (Kind != k_VectorIndex) return false;
1768     return VectorIndex.Val < 8;
1769   }
1770 
1771   bool isVectorIndex16() const {
1772     if (Kind != k_VectorIndex) return false;
1773     return VectorIndex.Val < 4;
1774   }
1775 
1776   bool isVectorIndex32() const {
1777     if (Kind != k_VectorIndex) return false;
1778     return VectorIndex.Val < 2;
1779   }
1780   bool isVectorIndex64() const {
1781     if (Kind != k_VectorIndex) return false;
1782     return VectorIndex.Val < 1;
1783   }
1784 
1785   bool isNEONi8splat() const {
1786     if (!isImm()) return false;
1787     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1788     // Must be a constant.
1789     if (!CE) return false;
1790     int64_t Value = CE->getValue();
1791     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1792     // value.
1793     return Value >= 0 && Value < 256;
1794   }
1795 
1796   bool isNEONi16splat() const {
1797     if (isNEONByteReplicate(2))
1798       return false; // Leave that for bytes replication and forbid by default.
1799     if (!isImm())
1800       return false;
1801     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1802     // Must be a constant.
1803     if (!CE) return false;
1804     unsigned Value = CE->getValue();
1805     return ARM_AM::isNEONi16splat(Value);
1806   }
1807 
1808   bool isNEONi16splatNot() const {
1809     if (!isImm())
1810       return false;
1811     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1812     // Must be a constant.
1813     if (!CE) return false;
1814     unsigned Value = CE->getValue();
1815     return ARM_AM::isNEONi16splat(~Value & 0xffff);
1816   }
1817 
1818   bool isNEONi32splat() const {
1819     if (isNEONByteReplicate(4))
1820       return false; // Leave that for bytes replication and forbid by default.
1821     if (!isImm())
1822       return false;
1823     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1824     // Must be a constant.
1825     if (!CE) return false;
1826     unsigned Value = CE->getValue();
1827     return ARM_AM::isNEONi32splat(Value);
1828   }
1829 
1830   bool isNEONi32splatNot() const {
1831     if (!isImm())
1832       return false;
1833     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1834     // Must be a constant.
1835     if (!CE) return false;
1836     unsigned Value = CE->getValue();
1837     return ARM_AM::isNEONi32splat(~Value);
1838   }
1839 
1840   bool isNEONByteReplicate(unsigned NumBytes) const {
1841     if (!isImm())
1842       return false;
1843     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1844     // Must be a constant.
1845     if (!CE)
1846       return false;
1847     int64_t Value = CE->getValue();
1848     if (!Value)
1849       return false; // Don't bother with zero.
1850 
1851     unsigned char B = Value & 0xff;
1852     for (unsigned i = 1; i < NumBytes; ++i) {
1853       Value >>= 8;
1854       if ((Value & 0xff) != B)
1855         return false;
1856     }
1857     return true;
1858   }
1859 
1860   bool isNEONi16ByteReplicate() const { return isNEONByteReplicate(2); }
1861   bool isNEONi32ByteReplicate() const { return isNEONByteReplicate(4); }
1862 
1863   bool isNEONi32vmov() const {
1864     if (isNEONByteReplicate(4))
1865       return false; // Let it to be classified as byte-replicate case.
1866     if (!isImm())
1867       return false;
1868     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1869     // Must be a constant.
1870     if (!CE)
1871       return false;
1872     int64_t Value = CE->getValue();
1873     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1874     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1875     // FIXME: This is probably wrong and a copy and paste from previous example
1876     return (Value >= 0 && Value < 256) ||
1877       (Value >= 0x0100 && Value <= 0xff00) ||
1878       (Value >= 0x010000 && Value <= 0xff0000) ||
1879       (Value >= 0x01000000 && Value <= 0xff000000) ||
1880       (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1881       (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1882   }
1883 
1884   bool isNEONi32vmovNeg() const {
1885     if (!isImm()) return false;
1886     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1887     // Must be a constant.
1888     if (!CE) return false;
1889     int64_t Value = ~CE->getValue();
1890     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1891     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1892     // FIXME: This is probably wrong and a copy and paste from previous example
1893     return (Value >= 0 && Value < 256) ||
1894       (Value >= 0x0100 && Value <= 0xff00) ||
1895       (Value >= 0x010000 && Value <= 0xff0000) ||
1896       (Value >= 0x01000000 && Value <= 0xff000000) ||
1897       (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1898       (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1899   }
1900 
1901   bool isNEONi64splat() const {
1902     if (!isImm()) return false;
1903     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1904     // Must be a constant.
1905     if (!CE) return false;
1906     uint64_t Value = CE->getValue();
1907     // i64 value with each byte being either 0 or 0xff.
1908     for (unsigned i = 0; i < 8; ++i, Value >>= 8)
1909       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1910     return true;
1911   }
1912 
1913   template<int64_t Angle, int64_t Remainder>
1914   bool isComplexRotation() const {
1915     if (!isImm()) return false;
1916 
1917     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1918     if (!CE) return false;
1919     uint64_t Value = CE->getValue();
1920 
1921     return (Value % Angle == Remainder && Value <= 270);
1922   }
1923 
1924   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1925     // Add as immediates when possible.  Null MCExpr = 0.
1926     if (!Expr)
1927       Inst.addOperand(MCOperand::createImm(0));
1928     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1929       Inst.addOperand(MCOperand::createImm(CE->getValue()));
1930     else
1931       Inst.addOperand(MCOperand::createExpr(Expr));
1932   }
1933 
1934   void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const {
1935     assert(N == 1 && "Invalid number of operands!");
1936     addExpr(Inst, getImm());
1937   }
1938 
1939   void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const {
1940     assert(N == 1 && "Invalid number of operands!");
1941     addExpr(Inst, getImm());
1942   }
1943 
1944   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1945     assert(N == 2 && "Invalid number of operands!");
1946     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1947     unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1948     Inst.addOperand(MCOperand::createReg(RegNum));
1949   }
1950 
1951   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1952     assert(N == 1 && "Invalid number of operands!");
1953     Inst.addOperand(MCOperand::createImm(getCoproc()));
1954   }
1955 
1956   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1957     assert(N == 1 && "Invalid number of operands!");
1958     Inst.addOperand(MCOperand::createImm(getCoproc()));
1959   }
1960 
1961   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1962     assert(N == 1 && "Invalid number of operands!");
1963     Inst.addOperand(MCOperand::createImm(CoprocOption.Val));
1964   }
1965 
1966   void addITMaskOperands(MCInst &Inst, unsigned N) const {
1967     assert(N == 1 && "Invalid number of operands!");
1968     Inst.addOperand(MCOperand::createImm(ITMask.Mask));
1969   }
1970 
1971   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1972     assert(N == 1 && "Invalid number of operands!");
1973     Inst.addOperand(MCOperand::createImm(unsigned(getCondCode())));
1974   }
1975 
1976   void addCCOutOperands(MCInst &Inst, unsigned N) const {
1977     assert(N == 1 && "Invalid number of operands!");
1978     Inst.addOperand(MCOperand::createReg(getReg()));
1979   }
1980 
1981   void addRegOperands(MCInst &Inst, unsigned N) const {
1982     assert(N == 1 && "Invalid number of operands!");
1983     Inst.addOperand(MCOperand::createReg(getReg()));
1984   }
1985 
1986   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1987     assert(N == 3 && "Invalid number of operands!");
1988     assert(isRegShiftedReg() &&
1989            "addRegShiftedRegOperands() on non-RegShiftedReg!");
1990     Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg));
1991     Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg));
1992     Inst.addOperand(MCOperand::createImm(
1993       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1994   }
1995 
1996   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1997     assert(N == 2 && "Invalid number of operands!");
1998     assert(isRegShiftedImm() &&
1999            "addRegShiftedImmOperands() on non-RegShiftedImm!");
2000     Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg));
2001     // Shift of #32 is encoded as 0 where permitted
2002     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
2003     Inst.addOperand(MCOperand::createImm(
2004       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
2005   }
2006 
2007   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
2008     assert(N == 1 && "Invalid number of operands!");
2009     Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) |
2010                                          ShifterImm.Imm));
2011   }
2012 
2013   void addRegListOperands(MCInst &Inst, unsigned N) const {
2014     assert(N == 1 && "Invalid number of operands!");
2015     const SmallVectorImpl<unsigned> &RegList = getRegList();
2016     for (SmallVectorImpl<unsigned>::const_iterator
2017            I = RegList.begin(), E = RegList.end(); I != E; ++I)
2018       Inst.addOperand(MCOperand::createReg(*I));
2019   }
2020 
2021   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
2022     addRegListOperands(Inst, N);
2023   }
2024 
2025   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
2026     addRegListOperands(Inst, N);
2027   }
2028 
2029   void addRotImmOperands(MCInst &Inst, unsigned N) const {
2030     assert(N == 1 && "Invalid number of operands!");
2031     // Encoded as val>>3. The printer handles display as 8, 16, 24.
2032     Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3));
2033   }
2034 
2035   void addModImmOperands(MCInst &Inst, unsigned N) const {
2036     assert(N == 1 && "Invalid number of operands!");
2037 
2038     // Support for fixups (MCFixup)
2039     if (isImm())
2040       return addImmOperands(Inst, N);
2041 
2042     Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7)));
2043   }
2044 
2045   void addModImmNotOperands(MCInst &Inst, unsigned N) const {
2046     assert(N == 1 && "Invalid number of operands!");
2047     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2048     uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue());
2049     Inst.addOperand(MCOperand::createImm(Enc));
2050   }
2051 
2052   void addModImmNegOperands(MCInst &Inst, unsigned N) const {
2053     assert(N == 1 && "Invalid number of operands!");
2054     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2055     uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue());
2056     Inst.addOperand(MCOperand::createImm(Enc));
2057   }
2058 
2059   void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const {
2060     assert(N == 1 && "Invalid number of operands!");
2061     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2062     uint32_t Val = -CE->getValue();
2063     Inst.addOperand(MCOperand::createImm(Val));
2064   }
2065 
2066   void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const {
2067     assert(N == 1 && "Invalid number of operands!");
2068     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2069     uint32_t Val = -CE->getValue();
2070     Inst.addOperand(MCOperand::createImm(Val));
2071   }
2072 
2073   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
2074     assert(N == 1 && "Invalid number of operands!");
2075     // Munge the lsb/width into a bitfield mask.
2076     unsigned lsb = Bitfield.LSB;
2077     unsigned width = Bitfield.Width;
2078     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
2079     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
2080                       (32 - (lsb + width)));
2081     Inst.addOperand(MCOperand::createImm(Mask));
2082   }
2083 
2084   void addImmOperands(MCInst &Inst, unsigned N) const {
2085     assert(N == 1 && "Invalid number of operands!");
2086     addExpr(Inst, getImm());
2087   }
2088 
2089   void addFBits16Operands(MCInst &Inst, unsigned N) const {
2090     assert(N == 1 && "Invalid number of operands!");
2091     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2092     Inst.addOperand(MCOperand::createImm(16 - CE->getValue()));
2093   }
2094 
2095   void addFBits32Operands(MCInst &Inst, unsigned N) const {
2096     assert(N == 1 && "Invalid number of operands!");
2097     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2098     Inst.addOperand(MCOperand::createImm(32 - CE->getValue()));
2099   }
2100 
2101   void addFPImmOperands(MCInst &Inst, unsigned N) const {
2102     assert(N == 1 && "Invalid number of operands!");
2103     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2104     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
2105     Inst.addOperand(MCOperand::createImm(Val));
2106   }
2107 
2108   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
2109     assert(N == 1 && "Invalid number of operands!");
2110     // FIXME: We really want to scale the value here, but the LDRD/STRD
2111     // instruction don't encode operands that way yet.
2112     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2113     Inst.addOperand(MCOperand::createImm(CE->getValue()));
2114   }
2115 
2116   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
2117     assert(N == 1 && "Invalid number of operands!");
2118     // The immediate is scaled by four in the encoding and is stored
2119     // in the MCInst as such. Lop off the low two bits here.
2120     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2121     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2122   }
2123 
2124   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
2125     assert(N == 1 && "Invalid number of operands!");
2126     // The immediate is scaled by four in the encoding and is stored
2127     // in the MCInst as such. Lop off the low two bits here.
2128     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2129     Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4)));
2130   }
2131 
2132   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
2133     assert(N == 1 && "Invalid number of operands!");
2134     // The immediate is scaled by four in the encoding and is stored
2135     // in the MCInst as such. Lop off the low two bits here.
2136     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2137     Inst.addOperand(MCOperand::createImm(CE->getValue() / 4));
2138   }
2139 
2140   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
2141     assert(N == 1 && "Invalid number of operands!");
2142     // The constant encodes as the immediate-1, and we store in the instruction
2143     // the bits as encoded, so subtract off one here.
2144     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2145     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2146   }
2147 
2148   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
2149     assert(N == 1 && "Invalid number of operands!");
2150     // The constant encodes as the immediate-1, and we store in the instruction
2151     // the bits as encoded, so subtract off one here.
2152     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2153     Inst.addOperand(MCOperand::createImm(CE->getValue() - 1));
2154   }
2155 
2156   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
2157     assert(N == 1 && "Invalid number of operands!");
2158     // The constant encodes as the immediate, except for 32, which encodes as
2159     // zero.
2160     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2161     unsigned Imm = CE->getValue();
2162     Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm)));
2163   }
2164 
2165   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
2166     assert(N == 1 && "Invalid number of operands!");
2167     // An ASR value of 32 encodes as 0, so that's how we want to add it to
2168     // the instruction as well.
2169     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2170     int Val = CE->getValue();
2171     Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val));
2172   }
2173 
2174   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
2175     assert(N == 1 && "Invalid number of operands!");
2176     // The operand is actually a t2_so_imm, but we have its bitwise
2177     // negation in the assembly source, so twiddle it here.
2178     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2179     Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue()));
2180   }
2181 
2182   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
2183     assert(N == 1 && "Invalid number of operands!");
2184     // The operand is actually a t2_so_imm, but we have its
2185     // negation in the assembly source, so twiddle it here.
2186     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2187     Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue()));
2188   }
2189 
2190   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
2191     assert(N == 1 && "Invalid number of operands!");
2192     // The operand is actually an imm0_4095, but we have its
2193     // negation in the assembly source, so twiddle it here.
2194     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2195     Inst.addOperand(MCOperand::createImm(-CE->getValue()));
2196   }
2197 
2198   void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const {
2199     if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) {
2200       Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2));
2201       return;
2202     }
2203 
2204     const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2205     assert(SR && "Unknown value type!");
2206     Inst.addOperand(MCOperand::createExpr(SR));
2207   }
2208 
2209   void addThumbMemPCOperands(MCInst &Inst, unsigned N) const {
2210     assert(N == 1 && "Invalid number of operands!");
2211     if (isImm()) {
2212       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2213       if (CE) {
2214         Inst.addOperand(MCOperand::createImm(CE->getValue()));
2215         return;
2216       }
2217 
2218       const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val);
2219 
2220       assert(SR && "Unknown value type!");
2221       Inst.addOperand(MCOperand::createExpr(SR));
2222       return;
2223     }
2224 
2225     assert(isMem()  && "Unknown value type!");
2226     assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!");
2227     Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue()));
2228   }
2229 
2230   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
2231     assert(N == 1 && "Invalid number of operands!");
2232     Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt())));
2233   }
2234 
2235   void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const {
2236     assert(N == 1 && "Invalid number of operands!");
2237     Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt())));
2238   }
2239 
2240   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
2241     assert(N == 1 && "Invalid number of operands!");
2242     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2243   }
2244 
2245   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
2246     assert(N == 1 && "Invalid number of operands!");
2247     int32_t Imm = Memory.OffsetImm->getValue();
2248     Inst.addOperand(MCOperand::createImm(Imm));
2249   }
2250 
2251   void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2252     assert(N == 1 && "Invalid number of operands!");
2253     assert(isImm() && "Not an immediate!");
2254 
2255     // If we have an immediate that's not a constant, treat it as a label
2256     // reference needing a fixup.
2257     if (!isa<MCConstantExpr>(getImm())) {
2258       Inst.addOperand(MCOperand::createExpr(getImm()));
2259       return;
2260     }
2261 
2262     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2263     int Val = CE->getValue();
2264     Inst.addOperand(MCOperand::createImm(Val));
2265   }
2266 
2267   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
2268     assert(N == 2 && "Invalid number of operands!");
2269     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2270     Inst.addOperand(MCOperand::createImm(Memory.Alignment));
2271   }
2272 
2273   void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2274     addAlignedMemoryOperands(Inst, N);
2275   }
2276 
2277   void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const {
2278     addAlignedMemoryOperands(Inst, N);
2279   }
2280 
2281   void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2282     addAlignedMemoryOperands(Inst, N);
2283   }
2284 
2285   void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const {
2286     addAlignedMemoryOperands(Inst, N);
2287   }
2288 
2289   void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2290     addAlignedMemoryOperands(Inst, N);
2291   }
2292 
2293   void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const {
2294     addAlignedMemoryOperands(Inst, N);
2295   }
2296 
2297   void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2298     addAlignedMemoryOperands(Inst, N);
2299   }
2300 
2301   void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const {
2302     addAlignedMemoryOperands(Inst, N);
2303   }
2304 
2305   void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2306     addAlignedMemoryOperands(Inst, N);
2307   }
2308 
2309   void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const {
2310     addAlignedMemoryOperands(Inst, N);
2311   }
2312 
2313   void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const {
2314     addAlignedMemoryOperands(Inst, N);
2315   }
2316 
2317   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
2318     assert(N == 3 && "Invalid number of operands!");
2319     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2320     if (!Memory.OffsetRegNum) {
2321       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2322       // Special case for #-0
2323       if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2324       if (Val < 0) Val = -Val;
2325       Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2326     } else {
2327       // For register offset, we encode the shift type and negation flag
2328       // here.
2329       Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2330                               Memory.ShiftImm, Memory.ShiftType);
2331     }
2332     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2333     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2334     Inst.addOperand(MCOperand::createImm(Val));
2335   }
2336 
2337   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
2338     assert(N == 2 && "Invalid number of operands!");
2339     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2340     assert(CE && "non-constant AM2OffsetImm operand!");
2341     int32_t Val = CE->getValue();
2342     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2343     // Special case for #-0
2344     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2345     if (Val < 0) Val = -Val;
2346     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
2347     Inst.addOperand(MCOperand::createReg(0));
2348     Inst.addOperand(MCOperand::createImm(Val));
2349   }
2350 
2351   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
2352     assert(N == 3 && "Invalid number of operands!");
2353     // If we have an immediate that's not a constant, treat it as a label
2354     // reference needing a fixup. If it is a constant, it's something else
2355     // and we reject it.
2356     if (isImm()) {
2357       Inst.addOperand(MCOperand::createExpr(getImm()));
2358       Inst.addOperand(MCOperand::createReg(0));
2359       Inst.addOperand(MCOperand::createImm(0));
2360       return;
2361     }
2362 
2363     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2364     if (!Memory.OffsetRegNum) {
2365       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2366       // Special case for #-0
2367       if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2368       if (Val < 0) Val = -Val;
2369       Val = ARM_AM::getAM3Opc(AddSub, Val);
2370     } else {
2371       // For register offset, we encode the shift type and negation flag
2372       // here.
2373       Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
2374     }
2375     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2376     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2377     Inst.addOperand(MCOperand::createImm(Val));
2378   }
2379 
2380   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
2381     assert(N == 2 && "Invalid number of operands!");
2382     if (Kind == k_PostIndexRegister) {
2383       int32_t Val =
2384         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
2385       Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2386       Inst.addOperand(MCOperand::createImm(Val));
2387       return;
2388     }
2389 
2390     // Constant offset.
2391     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
2392     int32_t Val = CE->getValue();
2393     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2394     // Special case for #-0
2395     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2396     if (Val < 0) Val = -Val;
2397     Val = ARM_AM::getAM3Opc(AddSub, Val);
2398     Inst.addOperand(MCOperand::createReg(0));
2399     Inst.addOperand(MCOperand::createImm(Val));
2400   }
2401 
2402   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
2403     assert(N == 2 && "Invalid number of operands!");
2404     // If we have an immediate that's not a constant, treat it as a label
2405     // reference needing a fixup. If it is a constant, it's something else
2406     // and we reject it.
2407     if (isImm()) {
2408       Inst.addOperand(MCOperand::createExpr(getImm()));
2409       Inst.addOperand(MCOperand::createImm(0));
2410       return;
2411     }
2412 
2413     // The lower two bits are always zero and as such are not encoded.
2414     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2415     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2416     // Special case for #-0
2417     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2418     if (Val < 0) Val = -Val;
2419     Val = ARM_AM::getAM5Opc(AddSub, Val);
2420     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2421     Inst.addOperand(MCOperand::createImm(Val));
2422   }
2423 
2424   void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const {
2425     assert(N == 2 && "Invalid number of operands!");
2426     // If we have an immediate that's not a constant, treat it as a label
2427     // reference needing a fixup. If it is a constant, it's something else
2428     // and we reject it.
2429     if (isImm()) {
2430       Inst.addOperand(MCOperand::createExpr(getImm()));
2431       Inst.addOperand(MCOperand::createImm(0));
2432       return;
2433     }
2434 
2435     // The lower bit is always zero and as such is not encoded.
2436     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0;
2437     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
2438     // Special case for #-0
2439     if (Val == std::numeric_limits<int32_t>::min()) Val = 0;
2440     if (Val < 0) Val = -Val;
2441     Val = ARM_AM::getAM5FP16Opc(AddSub, Val);
2442     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2443     Inst.addOperand(MCOperand::createImm(Val));
2444   }
2445 
2446   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
2447     assert(N == 2 && "Invalid number of operands!");
2448     // If we have an immediate that's not a constant, treat it as a label
2449     // reference needing a fixup. If it is a constant, it's something else
2450     // and we reject it.
2451     if (isImm()) {
2452       Inst.addOperand(MCOperand::createExpr(getImm()));
2453       Inst.addOperand(MCOperand::createImm(0));
2454       return;
2455     }
2456 
2457     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2458     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2459     Inst.addOperand(MCOperand::createImm(Val));
2460   }
2461 
2462   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
2463     assert(N == 2 && "Invalid number of operands!");
2464     // The lower two bits are always zero and as such are not encoded.
2465     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
2466     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2467     Inst.addOperand(MCOperand::createImm(Val));
2468   }
2469 
2470   void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2471     assert(N == 2 && "Invalid number of operands!");
2472     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2473     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2474     Inst.addOperand(MCOperand::createImm(Val));
2475   }
2476 
2477   void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2478     addMemImm8OffsetOperands(Inst, N);
2479   }
2480 
2481   void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
2482     addMemImm8OffsetOperands(Inst, N);
2483   }
2484 
2485   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2486     assert(N == 2 && "Invalid number of operands!");
2487     // If this is an immediate, it's a label reference.
2488     if (isImm()) {
2489       addExpr(Inst, getImm());
2490       Inst.addOperand(MCOperand::createImm(0));
2491       return;
2492     }
2493 
2494     // Otherwise, it's a normal memory reg+offset.
2495     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2496     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2497     Inst.addOperand(MCOperand::createImm(Val));
2498   }
2499 
2500   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2501     assert(N == 2 && "Invalid number of operands!");
2502     // If this is an immediate, it's a label reference.
2503     if (isImm()) {
2504       addExpr(Inst, getImm());
2505       Inst.addOperand(MCOperand::createImm(0));
2506       return;
2507     }
2508 
2509     // Otherwise, it's a normal memory reg+offset.
2510     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
2511     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2512     Inst.addOperand(MCOperand::createImm(Val));
2513   }
2514 
2515   void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const {
2516     assert(N == 1 && "Invalid number of operands!");
2517     // This is container for the immediate that we will create the constant
2518     // pool from
2519     addExpr(Inst, getConstantPoolImm());
2520     return;
2521   }
2522 
2523   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
2524     assert(N == 2 && "Invalid number of operands!");
2525     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2526     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2527   }
2528 
2529   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
2530     assert(N == 2 && "Invalid number of operands!");
2531     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2532     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2533   }
2534 
2535   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2536     assert(N == 3 && "Invalid number of operands!");
2537     unsigned Val =
2538       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
2539                         Memory.ShiftImm, Memory.ShiftType);
2540     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2541     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2542     Inst.addOperand(MCOperand::createImm(Val));
2543   }
2544 
2545   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
2546     assert(N == 3 && "Invalid number of operands!");
2547     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2548     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2549     Inst.addOperand(MCOperand::createImm(Memory.ShiftImm));
2550   }
2551 
2552   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
2553     assert(N == 2 && "Invalid number of operands!");
2554     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2555     Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum));
2556   }
2557 
2558   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
2559     assert(N == 2 && "Invalid number of operands!");
2560     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2561     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2562     Inst.addOperand(MCOperand::createImm(Val));
2563   }
2564 
2565   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
2566     assert(N == 2 && "Invalid number of operands!");
2567     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
2568     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2569     Inst.addOperand(MCOperand::createImm(Val));
2570   }
2571 
2572   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
2573     assert(N == 2 && "Invalid number of operands!");
2574     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
2575     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2576     Inst.addOperand(MCOperand::createImm(Val));
2577   }
2578 
2579   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
2580     assert(N == 2 && "Invalid number of operands!");
2581     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
2582     Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum));
2583     Inst.addOperand(MCOperand::createImm(Val));
2584   }
2585 
2586   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
2587     assert(N == 1 && "Invalid number of operands!");
2588     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2589     assert(CE && "non-constant post-idx-imm8 operand!");
2590     int Imm = CE->getValue();
2591     bool isAdd = Imm >= 0;
2592     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2593     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
2594     Inst.addOperand(MCOperand::createImm(Imm));
2595   }
2596 
2597   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
2598     assert(N == 1 && "Invalid number of operands!");
2599     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2600     assert(CE && "non-constant post-idx-imm8s4 operand!");
2601     int Imm = CE->getValue();
2602     bool isAdd = Imm >= 0;
2603     if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0;
2604     // Immediate is scaled by 4.
2605     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
2606     Inst.addOperand(MCOperand::createImm(Imm));
2607   }
2608 
2609   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
2610     assert(N == 2 && "Invalid number of operands!");
2611     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2612     Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd));
2613   }
2614 
2615   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
2616     assert(N == 2 && "Invalid number of operands!");
2617     Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum));
2618     // The sign, shift type, and shift amount are encoded in a single operand
2619     // using the AM2 encoding helpers.
2620     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
2621     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
2622                                      PostIdxReg.ShiftTy);
2623     Inst.addOperand(MCOperand::createImm(Imm));
2624   }
2625 
2626   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
2627     assert(N == 1 && "Invalid number of operands!");
2628     Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask())));
2629   }
2630 
2631   void addBankedRegOperands(MCInst &Inst, unsigned N) const {
2632     assert(N == 1 && "Invalid number of operands!");
2633     Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg())));
2634   }
2635 
2636   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
2637     assert(N == 1 && "Invalid number of operands!");
2638     Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags())));
2639   }
2640 
2641   void addVecListOperands(MCInst &Inst, unsigned N) const {
2642     assert(N == 1 && "Invalid number of operands!");
2643     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2644   }
2645 
2646   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
2647     assert(N == 2 && "Invalid number of operands!");
2648     Inst.addOperand(MCOperand::createReg(VectorList.RegNum));
2649     Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex));
2650   }
2651 
2652   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
2653     assert(N == 1 && "Invalid number of operands!");
2654     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2655   }
2656 
2657   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
2658     assert(N == 1 && "Invalid number of operands!");
2659     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2660   }
2661 
2662   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
2663     assert(N == 1 && "Invalid number of operands!");
2664     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2665   }
2666 
2667   void addVectorIndex64Operands(MCInst &Inst, unsigned N) const {
2668     assert(N == 1 && "Invalid number of operands!");
2669     Inst.addOperand(MCOperand::createImm(getVectorIndex()));
2670   }
2671 
2672   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2673     assert(N == 1 && "Invalid number of operands!");
2674     // The immediate encodes the type of constant as well as the value.
2675     // Mask in that this is an i8 splat.
2676     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2677     Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00));
2678   }
2679 
2680   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2681     assert(N == 1 && "Invalid number of operands!");
2682     // The immediate encodes the type of constant as well as the value.
2683     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2684     unsigned Value = CE->getValue();
2685     Value = ARM_AM::encodeNEONi16splat(Value);
2686     Inst.addOperand(MCOperand::createImm(Value));
2687   }
2688 
2689   void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const {
2690     assert(N == 1 && "Invalid number of operands!");
2691     // The immediate encodes the type of constant as well as the value.
2692     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2693     unsigned Value = CE->getValue();
2694     Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff);
2695     Inst.addOperand(MCOperand::createImm(Value));
2696   }
2697 
2698   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2699     assert(N == 1 && "Invalid number of operands!");
2700     // The immediate encodes the type of constant as well as the value.
2701     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2702     unsigned Value = CE->getValue();
2703     Value = ARM_AM::encodeNEONi32splat(Value);
2704     Inst.addOperand(MCOperand::createImm(Value));
2705   }
2706 
2707   void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const {
2708     assert(N == 1 && "Invalid number of operands!");
2709     // The immediate encodes the type of constant as well as the value.
2710     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2711     unsigned Value = CE->getValue();
2712     Value = ARM_AM::encodeNEONi32splat(~Value);
2713     Inst.addOperand(MCOperand::createImm(Value));
2714   }
2715 
2716   void addNEONinvByteReplicateOperands(MCInst &Inst, unsigned N) const {
2717     assert(N == 1 && "Invalid number of operands!");
2718     // The immediate encodes the type of constant as well as the value.
2719     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2720     unsigned Value = CE->getValue();
2721     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2722             Inst.getOpcode() == ARM::VMOVv16i8) &&
2723            "All vmvn instructions that wants to replicate non-zero byte "
2724            "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2725     unsigned B = ((~Value) & 0xff);
2726     B |= 0xe00; // cmode = 0b1110
2727     Inst.addOperand(MCOperand::createImm(B));
2728   }
2729 
2730   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2731     assert(N == 1 && "Invalid number of operands!");
2732     // The immediate encodes the type of constant as well as the value.
2733     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2734     unsigned Value = CE->getValue();
2735     if (Value >= 256 && Value <= 0xffff)
2736       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2737     else if (Value > 0xffff && Value <= 0xffffff)
2738       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2739     else if (Value > 0xffffff)
2740       Value = (Value >> 24) | 0x600;
2741     Inst.addOperand(MCOperand::createImm(Value));
2742   }
2743 
2744   void addNEONvmovByteReplicateOperands(MCInst &Inst, unsigned N) const {
2745     assert(N == 1 && "Invalid number of operands!");
2746     // The immediate encodes the type of constant as well as the value.
2747     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2748     unsigned Value = CE->getValue();
2749     assert((Inst.getOpcode() == ARM::VMOVv8i8 ||
2750             Inst.getOpcode() == ARM::VMOVv16i8) &&
2751            "All instructions that wants to replicate non-zero byte "
2752            "always must be replaced with VMOVv8i8 or VMOVv16i8.");
2753     unsigned B = Value & 0xff;
2754     B |= 0xe00; // cmode = 0b1110
2755     Inst.addOperand(MCOperand::createImm(B));
2756   }
2757 
2758   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2759     assert(N == 1 && "Invalid number of operands!");
2760     // The immediate encodes the type of constant as well as the value.
2761     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2762     unsigned Value = ~CE->getValue();
2763     if (Value >= 256 && Value <= 0xffff)
2764       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2765     else if (Value > 0xffff && Value <= 0xffffff)
2766       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2767     else if (Value > 0xffffff)
2768       Value = (Value >> 24) | 0x600;
2769     Inst.addOperand(MCOperand::createImm(Value));
2770   }
2771 
2772   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2773     assert(N == 1 && "Invalid number of operands!");
2774     // The immediate encodes the type of constant as well as the value.
2775     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2776     uint64_t Value = CE->getValue();
2777     unsigned Imm = 0;
2778     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2779       Imm |= (Value & 1) << i;
2780     }
2781     Inst.addOperand(MCOperand::createImm(Imm | 0x1e00));
2782   }
2783 
2784   void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2785     assert(N == 1 && "Invalid number of operands!");
2786     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2787     Inst.addOperand(MCOperand::createImm(CE->getValue() / 90));
2788   }
2789 
2790   void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2791     assert(N == 1 && "Invalid number of operands!");
2792     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2793     Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180));
2794   }
2795 
2796   void print(raw_ostream &OS) const override;
2797 
2798   static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) {
2799     auto Op = make_unique<ARMOperand>(k_ITCondMask);
2800     Op->ITMask.Mask = Mask;
2801     Op->StartLoc = S;
2802     Op->EndLoc = S;
2803     return Op;
2804   }
2805 
2806   static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC,
2807                                                     SMLoc S) {
2808     auto Op = make_unique<ARMOperand>(k_CondCode);
2809     Op->CC.Val = CC;
2810     Op->StartLoc = S;
2811     Op->EndLoc = S;
2812     return Op;
2813   }
2814 
2815   static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) {
2816     auto Op = make_unique<ARMOperand>(k_CoprocNum);
2817     Op->Cop.Val = CopVal;
2818     Op->StartLoc = S;
2819     Op->EndLoc = S;
2820     return Op;
2821   }
2822 
2823   static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) {
2824     auto Op = make_unique<ARMOperand>(k_CoprocReg);
2825     Op->Cop.Val = CopVal;
2826     Op->StartLoc = S;
2827     Op->EndLoc = S;
2828     return Op;
2829   }
2830 
2831   static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S,
2832                                                         SMLoc E) {
2833     auto Op = make_unique<ARMOperand>(k_CoprocOption);
2834     Op->Cop.Val = Val;
2835     Op->StartLoc = S;
2836     Op->EndLoc = E;
2837     return Op;
2838   }
2839 
2840   static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) {
2841     auto Op = make_unique<ARMOperand>(k_CCOut);
2842     Op->Reg.RegNum = RegNum;
2843     Op->StartLoc = S;
2844     Op->EndLoc = S;
2845     return Op;
2846   }
2847 
2848   static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) {
2849     auto Op = make_unique<ARMOperand>(k_Token);
2850     Op->Tok.Data = Str.data();
2851     Op->Tok.Length = Str.size();
2852     Op->StartLoc = S;
2853     Op->EndLoc = S;
2854     return Op;
2855   }
2856 
2857   static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S,
2858                                                SMLoc E) {
2859     auto Op = make_unique<ARMOperand>(k_Register);
2860     Op->Reg.RegNum = RegNum;
2861     Op->StartLoc = S;
2862     Op->EndLoc = E;
2863     return Op;
2864   }
2865 
2866   static std::unique_ptr<ARMOperand>
2867   CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2868                         unsigned ShiftReg, unsigned ShiftImm, SMLoc S,
2869                         SMLoc E) {
2870     auto Op = make_unique<ARMOperand>(k_ShiftedRegister);
2871     Op->RegShiftedReg.ShiftTy = ShTy;
2872     Op->RegShiftedReg.SrcReg = SrcReg;
2873     Op->RegShiftedReg.ShiftReg = ShiftReg;
2874     Op->RegShiftedReg.ShiftImm = ShiftImm;
2875     Op->StartLoc = S;
2876     Op->EndLoc = E;
2877     return Op;
2878   }
2879 
2880   static std::unique_ptr<ARMOperand>
2881   CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg,
2882                          unsigned ShiftImm, SMLoc S, SMLoc E) {
2883     auto Op = make_unique<ARMOperand>(k_ShiftedImmediate);
2884     Op->RegShiftedImm.ShiftTy = ShTy;
2885     Op->RegShiftedImm.SrcReg = SrcReg;
2886     Op->RegShiftedImm.ShiftImm = ShiftImm;
2887     Op->StartLoc = S;
2888     Op->EndLoc = E;
2889     return Op;
2890   }
2891 
2892   static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm,
2893                                                       SMLoc S, SMLoc E) {
2894     auto Op = make_unique<ARMOperand>(k_ShifterImmediate);
2895     Op->ShifterImm.isASR = isASR;
2896     Op->ShifterImm.Imm = Imm;
2897     Op->StartLoc = S;
2898     Op->EndLoc = E;
2899     return Op;
2900   }
2901 
2902   static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S,
2903                                                   SMLoc E) {
2904     auto Op = make_unique<ARMOperand>(k_RotateImmediate);
2905     Op->RotImm.Imm = Imm;
2906     Op->StartLoc = S;
2907     Op->EndLoc = E;
2908     return Op;
2909   }
2910 
2911   static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot,
2912                                                   SMLoc S, SMLoc E) {
2913     auto Op = make_unique<ARMOperand>(k_ModifiedImmediate);
2914     Op->ModImm.Bits = Bits;
2915     Op->ModImm.Rot = Rot;
2916     Op->StartLoc = S;
2917     Op->EndLoc = E;
2918     return Op;
2919   }
2920 
2921   static std::unique_ptr<ARMOperand>
2922   CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2923     auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate);
2924     Op->Imm.Val = Val;
2925     Op->StartLoc = S;
2926     Op->EndLoc = E;
2927     return Op;
2928   }
2929 
2930   static std::unique_ptr<ARMOperand>
2931   CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) {
2932     auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor);
2933     Op->Bitfield.LSB = LSB;
2934     Op->Bitfield.Width = Width;
2935     Op->StartLoc = S;
2936     Op->EndLoc = E;
2937     return Op;
2938   }
2939 
2940   static std::unique_ptr<ARMOperand>
2941   CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs,
2942                 SMLoc StartLoc, SMLoc EndLoc) {
2943     assert(Regs.size() > 0 && "RegList contains no registers?");
2944     KindTy Kind = k_RegisterList;
2945 
2946     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second))
2947       Kind = k_DPRRegisterList;
2948     else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2949              contains(Regs.front().second))
2950       Kind = k_SPRRegisterList;
2951 
2952     // Sort based on the register encoding values.
2953     array_pod_sort(Regs.begin(), Regs.end());
2954 
2955     auto Op = make_unique<ARMOperand>(Kind);
2956     for (SmallVectorImpl<std::pair<unsigned, unsigned>>::const_iterator
2957            I = Regs.begin(), E = Regs.end(); I != E; ++I)
2958       Op->Registers.push_back(I->second);
2959     Op->StartLoc = StartLoc;
2960     Op->EndLoc = EndLoc;
2961     return Op;
2962   }
2963 
2964   static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum,
2965                                                       unsigned Count,
2966                                                       bool isDoubleSpaced,
2967                                                       SMLoc S, SMLoc E) {
2968     auto Op = make_unique<ARMOperand>(k_VectorList);
2969     Op->VectorList.RegNum = RegNum;
2970     Op->VectorList.Count = Count;
2971     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2972     Op->StartLoc = S;
2973     Op->EndLoc = E;
2974     return Op;
2975   }
2976 
2977   static std::unique_ptr<ARMOperand>
2978   CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced,
2979                            SMLoc S, SMLoc E) {
2980     auto Op = make_unique<ARMOperand>(k_VectorListAllLanes);
2981     Op->VectorList.RegNum = RegNum;
2982     Op->VectorList.Count = Count;
2983     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2984     Op->StartLoc = S;
2985     Op->EndLoc = E;
2986     return Op;
2987   }
2988 
2989   static std::unique_ptr<ARMOperand>
2990   CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index,
2991                           bool isDoubleSpaced, SMLoc S, SMLoc E) {
2992     auto Op = make_unique<ARMOperand>(k_VectorListIndexed);
2993     Op->VectorList.RegNum = RegNum;
2994     Op->VectorList.Count = Count;
2995     Op->VectorList.LaneIndex = Index;
2996     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2997     Op->StartLoc = S;
2998     Op->EndLoc = E;
2999     return Op;
3000   }
3001 
3002   static std::unique_ptr<ARMOperand>
3003   CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
3004     auto Op = make_unique<ARMOperand>(k_VectorIndex);
3005     Op->VectorIndex.Val = Idx;
3006     Op->StartLoc = S;
3007     Op->EndLoc = E;
3008     return Op;
3009   }
3010 
3011   static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S,
3012                                                SMLoc E) {
3013     auto Op = make_unique<ARMOperand>(k_Immediate);
3014     Op->Imm.Val = Val;
3015     Op->StartLoc = S;
3016     Op->EndLoc = E;
3017     return Op;
3018   }
3019 
3020   static std::unique_ptr<ARMOperand>
3021   CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm,
3022             unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType,
3023             unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S,
3024             SMLoc E, SMLoc AlignmentLoc = SMLoc()) {
3025     auto Op = make_unique<ARMOperand>(k_Memory);
3026     Op->Memory.BaseRegNum = BaseRegNum;
3027     Op->Memory.OffsetImm = OffsetImm;
3028     Op->Memory.OffsetRegNum = OffsetRegNum;
3029     Op->Memory.ShiftType = ShiftType;
3030     Op->Memory.ShiftImm = ShiftImm;
3031     Op->Memory.Alignment = Alignment;
3032     Op->Memory.isNegative = isNegative;
3033     Op->StartLoc = S;
3034     Op->EndLoc = E;
3035     Op->AlignmentLoc = AlignmentLoc;
3036     return Op;
3037   }
3038 
3039   static std::unique_ptr<ARMOperand>
3040   CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy,
3041                    unsigned ShiftImm, SMLoc S, SMLoc E) {
3042     auto Op = make_unique<ARMOperand>(k_PostIndexRegister);
3043     Op->PostIdxReg.RegNum = RegNum;
3044     Op->PostIdxReg.isAdd = isAdd;
3045     Op->PostIdxReg.ShiftTy = ShiftTy;
3046     Op->PostIdxReg.ShiftImm = ShiftImm;
3047     Op->StartLoc = S;
3048     Op->EndLoc = E;
3049     return Op;
3050   }
3051 
3052   static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt,
3053                                                          SMLoc S) {
3054     auto Op = make_unique<ARMOperand>(k_MemBarrierOpt);
3055     Op->MBOpt.Val = Opt;
3056     Op->StartLoc = S;
3057     Op->EndLoc = S;
3058     return Op;
3059   }
3060 
3061   static std::unique_ptr<ARMOperand>
3062   CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) {
3063     auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt);
3064     Op->ISBOpt.Val = Opt;
3065     Op->StartLoc = S;
3066     Op->EndLoc = S;
3067     return Op;
3068   }
3069 
3070   static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags,
3071                                                       SMLoc S) {
3072     auto Op = make_unique<ARMOperand>(k_ProcIFlags);
3073     Op->IFlags.Val = IFlags;
3074     Op->StartLoc = S;
3075     Op->EndLoc = S;
3076     return Op;
3077   }
3078 
3079   static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) {
3080     auto Op = make_unique<ARMOperand>(k_MSRMask);
3081     Op->MMask.Val = MMask;
3082     Op->StartLoc = S;
3083     Op->EndLoc = S;
3084     return Op;
3085   }
3086 
3087   static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) {
3088     auto Op = make_unique<ARMOperand>(k_BankedReg);
3089     Op->BankedReg.Val = Reg;
3090     Op->StartLoc = S;
3091     Op->EndLoc = S;
3092     return Op;
3093   }
3094 };
3095 
3096 } // end anonymous namespace.
3097 
3098 void ARMOperand::print(raw_ostream &OS) const {
3099   switch (Kind) {
3100   case k_CondCode:
3101     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
3102     break;
3103   case k_CCOut:
3104     OS << "<ccout " << getReg() << ">";
3105     break;
3106   case k_ITCondMask: {
3107     static const char *const MaskStr[] = {
3108       "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
3109       "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
3110     };
3111     assert((ITMask.Mask & 0xf) == ITMask.Mask);
3112     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
3113     break;
3114   }
3115   case k_CoprocNum:
3116     OS << "<coprocessor number: " << getCoproc() << ">";
3117     break;
3118   case k_CoprocReg:
3119     OS << "<coprocessor register: " << getCoproc() << ">";
3120     break;
3121   case k_CoprocOption:
3122     OS << "<coprocessor option: " << CoprocOption.Val << ">";
3123     break;
3124   case k_MSRMask:
3125     OS << "<mask: " << getMSRMask() << ">";
3126     break;
3127   case k_BankedReg:
3128     OS << "<banked reg: " << getBankedReg() << ">";
3129     break;
3130   case k_Immediate:
3131     OS << *getImm();
3132     break;
3133   case k_MemBarrierOpt:
3134     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">";
3135     break;
3136   case k_InstSyncBarrierOpt:
3137     OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">";
3138     break;
3139   case k_Memory:
3140     OS << "<memory "
3141        << " base:" << Memory.BaseRegNum;
3142     OS << ">";
3143     break;
3144   case k_PostIndexRegister:
3145     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
3146        << PostIdxReg.RegNum;
3147     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
3148       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
3149          << PostIdxReg.ShiftImm;
3150     OS << ">";
3151     break;
3152   case k_ProcIFlags: {
3153     OS << "<ARM_PROC::";
3154     unsigned IFlags = getProcIFlags();
3155     for (int i=2; i >= 0; --i)
3156       if (IFlags & (1 << i))
3157         OS << ARM_PROC::IFlagsToString(1 << i);
3158     OS << ">";
3159     break;
3160   }
3161   case k_Register:
3162     OS << "<register " << getReg() << ">";
3163     break;
3164   case k_ShifterImmediate:
3165     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
3166        << " #" << ShifterImm.Imm << ">";
3167     break;
3168   case k_ShiftedRegister:
3169     OS << "<so_reg_reg "
3170        << RegShiftedReg.SrcReg << " "
3171        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
3172        << " " << RegShiftedReg.ShiftReg << ">";
3173     break;
3174   case k_ShiftedImmediate:
3175     OS << "<so_reg_imm "
3176        << RegShiftedImm.SrcReg << " "
3177        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
3178        << " #" << RegShiftedImm.ShiftImm << ">";
3179     break;
3180   case k_RotateImmediate:
3181     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
3182     break;
3183   case k_ModifiedImmediate:
3184     OS << "<mod_imm #" << ModImm.Bits << ", #"
3185        <<  ModImm.Rot << ")>";
3186     break;
3187   case k_ConstantPoolImmediate:
3188     OS << "<constant_pool_imm #" << *getConstantPoolImm();
3189     break;
3190   case k_BitfieldDescriptor:
3191     OS << "<bitfield " << "lsb: " << Bitfield.LSB
3192        << ", width: " << Bitfield.Width << ">";
3193     break;
3194   case k_RegisterList:
3195   case k_DPRRegisterList:
3196   case k_SPRRegisterList: {
3197     OS << "<register_list ";
3198 
3199     const SmallVectorImpl<unsigned> &RegList = getRegList();
3200     for (SmallVectorImpl<unsigned>::const_iterator
3201            I = RegList.begin(), E = RegList.end(); I != E; ) {
3202       OS << *I;
3203       if (++I < E) OS << ", ";
3204     }
3205 
3206     OS << ">";
3207     break;
3208   }
3209   case k_VectorList:
3210     OS << "<vector_list " << VectorList.Count << " * "
3211        << VectorList.RegNum << ">";
3212     break;
3213   case k_VectorListAllLanes:
3214     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
3215        << VectorList.RegNum << ">";
3216     break;
3217   case k_VectorListIndexed:
3218     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
3219        << VectorList.Count << " * " << VectorList.RegNum << ">";
3220     break;
3221   case k_Token:
3222     OS << "'" << getToken() << "'";
3223     break;
3224   case k_VectorIndex:
3225     OS << "<vectorindex " << getVectorIndex() << ">";
3226     break;
3227   }
3228 }
3229 
3230 /// @name Auto-generated Match Functions
3231 /// {
3232 
3233 static unsigned MatchRegisterName(StringRef Name);
3234 
3235 /// }
3236 
3237 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
3238                                  SMLoc &StartLoc, SMLoc &EndLoc) {
3239   const AsmToken &Tok = getParser().getTok();
3240   StartLoc = Tok.getLoc();
3241   EndLoc = Tok.getEndLoc();
3242   RegNo = tryParseRegister();
3243 
3244   return (RegNo == (unsigned)-1);
3245 }
3246 
3247 /// Try to parse a register name.  The token must be an Identifier when called,
3248 /// and if it is a register name the token is eaten and the register number is
3249 /// returned.  Otherwise return -1.
3250 int ARMAsmParser::tryParseRegister() {
3251   MCAsmParser &Parser = getParser();
3252   const AsmToken &Tok = Parser.getTok();
3253   if (Tok.isNot(AsmToken::Identifier)) return -1;
3254 
3255   std::string lowerCase = Tok.getString().lower();
3256   unsigned RegNum = MatchRegisterName(lowerCase);
3257   if (!RegNum) {
3258     RegNum = StringSwitch<unsigned>(lowerCase)
3259       .Case("r13", ARM::SP)
3260       .Case("r14", ARM::LR)
3261       .Case("r15", ARM::PC)
3262       .Case("ip", ARM::R12)
3263       // Additional register name aliases for 'gas' compatibility.
3264       .Case("a1", ARM::R0)
3265       .Case("a2", ARM::R1)
3266       .Case("a3", ARM::R2)
3267       .Case("a4", ARM::R3)
3268       .Case("v1", ARM::R4)
3269       .Case("v2", ARM::R5)
3270       .Case("v3", ARM::R6)
3271       .Case("v4", ARM::R7)
3272       .Case("v5", ARM::R8)
3273       .Case("v6", ARM::R9)
3274       .Case("v7", ARM::R10)
3275       .Case("v8", ARM::R11)
3276       .Case("sb", ARM::R9)
3277       .Case("sl", ARM::R10)
3278       .Case("fp", ARM::R11)
3279       .Default(0);
3280   }
3281   if (!RegNum) {
3282     // Check for aliases registered via .req. Canonicalize to lower case.
3283     // That's more consistent since register names are case insensitive, and
3284     // it's how the original entry was passed in from MC/MCParser/AsmParser.
3285     StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
3286     // If no match, return failure.
3287     if (Entry == RegisterReqs.end())
3288       return -1;
3289     Parser.Lex(); // Eat identifier token.
3290     return Entry->getValue();
3291   }
3292 
3293   // Some FPUs only have 16 D registers, so D16-D31 are invalid
3294   if (hasD16() && RegNum >= ARM::D16 && RegNum <= ARM::D31)
3295     return -1;
3296 
3297   Parser.Lex(); // Eat identifier token.
3298 
3299   return RegNum;
3300 }
3301 
3302 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
3303 // If a recoverable error occurs, return 1. If an irrecoverable error
3304 // occurs, return -1. An irrecoverable error is one where tokens have been
3305 // consumed in the process of trying to parse the shifter (i.e., when it is
3306 // indeed a shifter operand, but malformed).
3307 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) {
3308   MCAsmParser &Parser = getParser();
3309   SMLoc S = Parser.getTok().getLoc();
3310   const AsmToken &Tok = Parser.getTok();
3311   if (Tok.isNot(AsmToken::Identifier))
3312     return -1;
3313 
3314   std::string lowerCase = Tok.getString().lower();
3315   ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
3316       .Case("asl", ARM_AM::lsl)
3317       .Case("lsl", ARM_AM::lsl)
3318       .Case("lsr", ARM_AM::lsr)
3319       .Case("asr", ARM_AM::asr)
3320       .Case("ror", ARM_AM::ror)
3321       .Case("rrx", ARM_AM::rrx)
3322       .Default(ARM_AM::no_shift);
3323 
3324   if (ShiftTy == ARM_AM::no_shift)
3325     return 1;
3326 
3327   Parser.Lex(); // Eat the operator.
3328 
3329   // The source register for the shift has already been added to the
3330   // operand list, so we need to pop it off and combine it into the shifted
3331   // register operand instead.
3332   std::unique_ptr<ARMOperand> PrevOp(
3333       (ARMOperand *)Operands.pop_back_val().release());
3334   if (!PrevOp->isReg())
3335     return Error(PrevOp->getStartLoc(), "shift must be of a register");
3336   int SrcReg = PrevOp->getReg();
3337 
3338   SMLoc EndLoc;
3339   int64_t Imm = 0;
3340   int ShiftReg = 0;
3341   if (ShiftTy == ARM_AM::rrx) {
3342     // RRX Doesn't have an explicit shift amount. The encoder expects
3343     // the shift register to be the same as the source register. Seems odd,
3344     // but OK.
3345     ShiftReg = SrcReg;
3346   } else {
3347     // Figure out if this is shifted by a constant or a register (for non-RRX).
3348     if (Parser.getTok().is(AsmToken::Hash) ||
3349         Parser.getTok().is(AsmToken::Dollar)) {
3350       Parser.Lex(); // Eat hash.
3351       SMLoc ImmLoc = Parser.getTok().getLoc();
3352       const MCExpr *ShiftExpr = nullptr;
3353       if (getParser().parseExpression(ShiftExpr, EndLoc)) {
3354         Error(ImmLoc, "invalid immediate shift value");
3355         return -1;
3356       }
3357       // The expression must be evaluatable as an immediate.
3358       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
3359       if (!CE) {
3360         Error(ImmLoc, "invalid immediate shift value");
3361         return -1;
3362       }
3363       // Range check the immediate.
3364       // lsl, ror: 0 <= imm <= 31
3365       // lsr, asr: 0 <= imm <= 32
3366       Imm = CE->getValue();
3367       if (Imm < 0 ||
3368           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
3369           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
3370         Error(ImmLoc, "immediate shift value out of range");
3371         return -1;
3372       }
3373       // shift by zero is a nop. Always send it through as lsl.
3374       // ('as' compatibility)
3375       if (Imm == 0)
3376         ShiftTy = ARM_AM::lsl;
3377     } else if (Parser.getTok().is(AsmToken::Identifier)) {
3378       SMLoc L = Parser.getTok().getLoc();
3379       EndLoc = Parser.getTok().getEndLoc();
3380       ShiftReg = tryParseRegister();
3381       if (ShiftReg == -1) {
3382         Error(L, "expected immediate or register in shift operand");
3383         return -1;
3384       }
3385     } else {
3386       Error(Parser.getTok().getLoc(),
3387             "expected immediate or register in shift operand");
3388       return -1;
3389     }
3390   }
3391 
3392   if (ShiftReg && ShiftTy != ARM_AM::rrx)
3393     Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
3394                                                          ShiftReg, Imm,
3395                                                          S, EndLoc));
3396   else
3397     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
3398                                                           S, EndLoc));
3399 
3400   return 0;
3401 }
3402 
3403 /// Try to parse a register name.  The token must be an Identifier when called.
3404 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
3405 /// if there is a "writeback". 'true' if it's not a register.
3406 ///
3407 /// TODO this is likely to change to allow different register types and or to
3408 /// parse for a specific register type.
3409 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) {
3410   MCAsmParser &Parser = getParser();
3411   SMLoc RegStartLoc = Parser.getTok().getLoc();
3412   SMLoc RegEndLoc = Parser.getTok().getEndLoc();
3413   int RegNo = tryParseRegister();
3414   if (RegNo == -1)
3415     return true;
3416 
3417   Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc));
3418 
3419   const AsmToken &ExclaimTok = Parser.getTok();
3420   if (ExclaimTok.is(AsmToken::Exclaim)) {
3421     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
3422                                                ExclaimTok.getLoc()));
3423     Parser.Lex(); // Eat exclaim token
3424     return false;
3425   }
3426 
3427   // Also check for an index operand. This is only legal for vector registers,
3428   // but that'll get caught OK in operand matching, so we don't need to
3429   // explicitly filter everything else out here.
3430   if (Parser.getTok().is(AsmToken::LBrac)) {
3431     SMLoc SIdx = Parser.getTok().getLoc();
3432     Parser.Lex(); // Eat left bracket token.
3433 
3434     const MCExpr *ImmVal;
3435     if (getParser().parseExpression(ImmVal))
3436       return true;
3437     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3438     if (!MCE)
3439       return TokError("immediate value expected for vector index");
3440 
3441     if (Parser.getTok().isNot(AsmToken::RBrac))
3442       return Error(Parser.getTok().getLoc(), "']' expected");
3443 
3444     SMLoc E = Parser.getTok().getEndLoc();
3445     Parser.Lex(); // Eat right bracket token.
3446 
3447     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
3448                                                      SIdx, E,
3449                                                      getContext()));
3450   }
3451 
3452   return false;
3453 }
3454 
3455 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
3456 /// instruction with a symbolic operand name.
3457 /// We accept "crN" syntax for GAS compatibility.
3458 /// <operand-name> ::= <prefix><number>
3459 /// If CoprocOp is 'c', then:
3460 ///   <prefix> ::= c | cr
3461 /// If CoprocOp is 'p', then :
3462 ///   <prefix> ::= p
3463 /// <number> ::= integer in range [0, 15]
3464 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
3465   // Use the same layout as the tablegen'erated register name matcher. Ugly,
3466   // but efficient.
3467   if (Name.size() < 2 || Name[0] != CoprocOp)
3468     return -1;
3469   Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front();
3470 
3471   switch (Name.size()) {
3472   default: return -1;
3473   case 1:
3474     switch (Name[0]) {
3475     default:  return -1;
3476     case '0': return 0;
3477     case '1': return 1;
3478     case '2': return 2;
3479     case '3': return 3;
3480     case '4': return 4;
3481     case '5': return 5;
3482     case '6': return 6;
3483     case '7': return 7;
3484     case '8': return 8;
3485     case '9': return 9;
3486     }
3487   case 2:
3488     if (Name[0] != '1')
3489       return -1;
3490     switch (Name[1]) {
3491     default:  return -1;
3492     // CP10 and CP11 are VFP/NEON and so vector instructions should be used.
3493     // However, old cores (v5/v6) did use them in that way.
3494     case '0': return 10;
3495     case '1': return 11;
3496     case '2': return 12;
3497     case '3': return 13;
3498     case '4': return 14;
3499     case '5': return 15;
3500     }
3501   }
3502 }
3503 
3504 /// parseITCondCode - Try to parse a condition code for an IT instruction.
3505 OperandMatchResultTy
3506 ARMAsmParser::parseITCondCode(OperandVector &Operands) {
3507   MCAsmParser &Parser = getParser();
3508   SMLoc S = Parser.getTok().getLoc();
3509   const AsmToken &Tok = Parser.getTok();
3510   if (!Tok.is(AsmToken::Identifier))
3511     return MatchOperand_NoMatch;
3512   unsigned CC = ARMCondCodeFromString(Tok.getString());
3513   if (CC == ~0U)
3514     return MatchOperand_NoMatch;
3515   Parser.Lex(); // Eat the token.
3516 
3517   Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
3518 
3519   return MatchOperand_Success;
3520 }
3521 
3522 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
3523 /// token must be an Identifier when called, and if it is a coprocessor
3524 /// number, the token is eaten and the operand is added to the operand list.
3525 OperandMatchResultTy
3526 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) {
3527   MCAsmParser &Parser = getParser();
3528   SMLoc S = Parser.getTok().getLoc();
3529   const AsmToken &Tok = Parser.getTok();
3530   if (Tok.isNot(AsmToken::Identifier))
3531     return MatchOperand_NoMatch;
3532 
3533   int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
3534   if (Num == -1)
3535     return MatchOperand_NoMatch;
3536   // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions
3537   if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11))
3538     return MatchOperand_NoMatch;
3539 
3540   Parser.Lex(); // Eat identifier token.
3541   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
3542   return MatchOperand_Success;
3543 }
3544 
3545 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
3546 /// token must be an Identifier when called, and if it is a coprocessor
3547 /// number, the token is eaten and the operand is added to the operand list.
3548 OperandMatchResultTy
3549 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) {
3550   MCAsmParser &Parser = getParser();
3551   SMLoc S = Parser.getTok().getLoc();
3552   const AsmToken &Tok = Parser.getTok();
3553   if (Tok.isNot(AsmToken::Identifier))
3554     return MatchOperand_NoMatch;
3555 
3556   int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
3557   if (Reg == -1)
3558     return MatchOperand_NoMatch;
3559 
3560   Parser.Lex(); // Eat identifier token.
3561   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
3562   return MatchOperand_Success;
3563 }
3564 
3565 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
3566 /// coproc_option : '{' imm0_255 '}'
3567 OperandMatchResultTy
3568 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) {
3569   MCAsmParser &Parser = getParser();
3570   SMLoc S = Parser.getTok().getLoc();
3571 
3572   // If this isn't a '{', this isn't a coprocessor immediate operand.
3573   if (Parser.getTok().isNot(AsmToken::LCurly))
3574     return MatchOperand_NoMatch;
3575   Parser.Lex(); // Eat the '{'
3576 
3577   const MCExpr *Expr;
3578   SMLoc Loc = Parser.getTok().getLoc();
3579   if (getParser().parseExpression(Expr)) {
3580     Error(Loc, "illegal expression");
3581     return MatchOperand_ParseFail;
3582   }
3583   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3584   if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
3585     Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
3586     return MatchOperand_ParseFail;
3587   }
3588   int Val = CE->getValue();
3589 
3590   // Check for and consume the closing '}'
3591   if (Parser.getTok().isNot(AsmToken::RCurly))
3592     return MatchOperand_ParseFail;
3593   SMLoc E = Parser.getTok().getEndLoc();
3594   Parser.Lex(); // Eat the '}'
3595 
3596   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
3597   return MatchOperand_Success;
3598 }
3599 
3600 // For register list parsing, we need to map from raw GPR register numbering
3601 // to the enumeration values. The enumeration values aren't sorted by
3602 // register number due to our using "sp", "lr" and "pc" as canonical names.
3603 static unsigned getNextRegister(unsigned Reg) {
3604   // If this is a GPR, we need to do it manually, otherwise we can rely
3605   // on the sort ordering of the enumeration since the other reg-classes
3606   // are sane.
3607   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3608     return Reg + 1;
3609   switch(Reg) {
3610   default: llvm_unreachable("Invalid GPR number!");
3611   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
3612   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
3613   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
3614   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
3615   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
3616   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
3617   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
3618   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
3619   }
3620 }
3621 
3622 /// Parse a register list.
3623 bool ARMAsmParser::parseRegisterList(OperandVector &Operands) {
3624   MCAsmParser &Parser = getParser();
3625   if (Parser.getTok().isNot(AsmToken::LCurly))
3626     return TokError("Token is not a Left Curly Brace");
3627   SMLoc S = Parser.getTok().getLoc();
3628   Parser.Lex(); // Eat '{' token.
3629   SMLoc RegLoc = Parser.getTok().getLoc();
3630 
3631   // Check the first register in the list to see what register class
3632   // this is a list of.
3633   int Reg = tryParseRegister();
3634   if (Reg == -1)
3635     return Error(RegLoc, "register expected");
3636 
3637   // The reglist instructions have at most 16 registers, so reserve
3638   // space for that many.
3639   int EReg = 0;
3640   SmallVector<std::pair<unsigned, unsigned>, 16> Registers;
3641 
3642   // Allow Q regs and just interpret them as the two D sub-registers.
3643   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3644     Reg = getDRegFromQReg(Reg);
3645     EReg = MRI->getEncodingValue(Reg);
3646     Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3647     ++Reg;
3648   }
3649   const MCRegisterClass *RC;
3650   if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3651     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
3652   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
3653     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
3654   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
3655     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
3656   else
3657     return Error(RegLoc, "invalid register in register list");
3658 
3659   // Store the register.
3660   EReg = MRI->getEncodingValue(Reg);
3661   Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3662 
3663   // This starts immediately after the first register token in the list,
3664   // so we can see either a comma or a minus (range separator) as a legal
3665   // next token.
3666   while (Parser.getTok().is(AsmToken::Comma) ||
3667          Parser.getTok().is(AsmToken::Minus)) {
3668     if (Parser.getTok().is(AsmToken::Minus)) {
3669       Parser.Lex(); // Eat the minus.
3670       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3671       int EndReg = tryParseRegister();
3672       if (EndReg == -1)
3673         return Error(AfterMinusLoc, "register expected");
3674       // Allow Q regs and just interpret them as the two D sub-registers.
3675       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3676         EndReg = getDRegFromQReg(EndReg) + 1;
3677       // If the register is the same as the start reg, there's nothing
3678       // more to do.
3679       if (Reg == EndReg)
3680         continue;
3681       // The register must be in the same register class as the first.
3682       if (!RC->contains(EndReg))
3683         return Error(AfterMinusLoc, "invalid register in register list");
3684       // Ranges must go from low to high.
3685       if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
3686         return Error(AfterMinusLoc, "bad range in register list");
3687 
3688       // Add all the registers in the range to the register list.
3689       while (Reg != EndReg) {
3690         Reg = getNextRegister(Reg);
3691         EReg = MRI->getEncodingValue(Reg);
3692         Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3693       }
3694       continue;
3695     }
3696     Parser.Lex(); // Eat the comma.
3697     RegLoc = Parser.getTok().getLoc();
3698     int OldReg = Reg;
3699     const AsmToken RegTok = Parser.getTok();
3700     Reg = tryParseRegister();
3701     if (Reg == -1)
3702       return Error(RegLoc, "register expected");
3703     // Allow Q regs and just interpret them as the two D sub-registers.
3704     bool isQReg = false;
3705     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3706       Reg = getDRegFromQReg(Reg);
3707       isQReg = true;
3708     }
3709     // The register must be in the same register class as the first.
3710     if (!RC->contains(Reg))
3711       return Error(RegLoc, "invalid register in register list");
3712     // List must be monotonically increasing.
3713     if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
3714       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
3715         Warning(RegLoc, "register list not in ascending order");
3716       else
3717         return Error(RegLoc, "register list not in ascending order");
3718     }
3719     if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
3720       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
3721               ") in register list");
3722       continue;
3723     }
3724     // VFP register lists must also be contiguous.
3725     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
3726         Reg != OldReg + 1)
3727       return Error(RegLoc, "non-contiguous register range");
3728     EReg = MRI->getEncodingValue(Reg);
3729     Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3730     if (isQReg) {
3731       EReg = MRI->getEncodingValue(++Reg);
3732       Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg));
3733     }
3734   }
3735 
3736   if (Parser.getTok().isNot(AsmToken::RCurly))
3737     return Error(Parser.getTok().getLoc(), "'}' expected");
3738   SMLoc E = Parser.getTok().getEndLoc();
3739   Parser.Lex(); // Eat '}' token.
3740 
3741   // Push the register list operand.
3742   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
3743 
3744   // The ARM system instruction variants for LDM/STM have a '^' token here.
3745   if (Parser.getTok().is(AsmToken::Caret)) {
3746     Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
3747     Parser.Lex(); // Eat '^' token.
3748   }
3749 
3750   return false;
3751 }
3752 
3753 // Helper function to parse the lane index for vector lists.
3754 OperandMatchResultTy ARMAsmParser::
3755 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
3756   MCAsmParser &Parser = getParser();
3757   Index = 0; // Always return a defined index value.
3758   if (Parser.getTok().is(AsmToken::LBrac)) {
3759     Parser.Lex(); // Eat the '['.
3760     if (Parser.getTok().is(AsmToken::RBrac)) {
3761       // "Dn[]" is the 'all lanes' syntax.
3762       LaneKind = AllLanes;
3763       EndLoc = Parser.getTok().getEndLoc();
3764       Parser.Lex(); // Eat the ']'.
3765       return MatchOperand_Success;
3766     }
3767 
3768     // There's an optional '#' token here. Normally there wouldn't be, but
3769     // inline assemble puts one in, and it's friendly to accept that.
3770     if (Parser.getTok().is(AsmToken::Hash))
3771       Parser.Lex(); // Eat '#' or '$'.
3772 
3773     const MCExpr *LaneIndex;
3774     SMLoc Loc = Parser.getTok().getLoc();
3775     if (getParser().parseExpression(LaneIndex)) {
3776       Error(Loc, "illegal expression");
3777       return MatchOperand_ParseFail;
3778     }
3779     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3780     if (!CE) {
3781       Error(Loc, "lane index must be empty or an integer");
3782       return MatchOperand_ParseFail;
3783     }
3784     if (Parser.getTok().isNot(AsmToken::RBrac)) {
3785       Error(Parser.getTok().getLoc(), "']' expected");
3786       return MatchOperand_ParseFail;
3787     }
3788     EndLoc = Parser.getTok().getEndLoc();
3789     Parser.Lex(); // Eat the ']'.
3790     int64_t Val = CE->getValue();
3791 
3792     // FIXME: Make this range check context sensitive for .8, .16, .32.
3793     if (Val < 0 || Val > 7) {
3794       Error(Parser.getTok().getLoc(), "lane index out of range");
3795       return MatchOperand_ParseFail;
3796     }
3797     Index = Val;
3798     LaneKind = IndexedLane;
3799     return MatchOperand_Success;
3800   }
3801   LaneKind = NoLanes;
3802   return MatchOperand_Success;
3803 }
3804 
3805 // parse a vector register list
3806 OperandMatchResultTy
3807 ARMAsmParser::parseVectorList(OperandVector &Operands) {
3808   MCAsmParser &Parser = getParser();
3809   VectorLaneTy LaneKind;
3810   unsigned LaneIndex;
3811   SMLoc S = Parser.getTok().getLoc();
3812   // As an extension (to match gas), support a plain D register or Q register
3813   // (without encosing curly braces) as a single or double entry list,
3814   // respectively.
3815   if (Parser.getTok().is(AsmToken::Identifier)) {
3816     SMLoc E = Parser.getTok().getEndLoc();
3817     int Reg = tryParseRegister();
3818     if (Reg == -1)
3819       return MatchOperand_NoMatch;
3820     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3821       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3822       if (Res != MatchOperand_Success)
3823         return Res;
3824       switch (LaneKind) {
3825       case NoLanes:
3826         Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3827         break;
3828       case AllLanes:
3829         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3830                                                                 S, E));
3831         break;
3832       case IndexedLane:
3833         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3834                                                                LaneIndex,
3835                                                                false, S, E));
3836         break;
3837       }
3838       return MatchOperand_Success;
3839     }
3840     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3841       Reg = getDRegFromQReg(Reg);
3842       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3843       if (Res != MatchOperand_Success)
3844         return Res;
3845       switch (LaneKind) {
3846       case NoLanes:
3847         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3848                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3849         Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3850         break;
3851       case AllLanes:
3852         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3853                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3854         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3855                                                                 S, E));
3856         break;
3857       case IndexedLane:
3858         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3859                                                                LaneIndex,
3860                                                                false, S, E));
3861         break;
3862       }
3863       return MatchOperand_Success;
3864     }
3865     Error(S, "vector register expected");
3866     return MatchOperand_ParseFail;
3867   }
3868 
3869   if (Parser.getTok().isNot(AsmToken::LCurly))
3870     return MatchOperand_NoMatch;
3871 
3872   Parser.Lex(); // Eat '{' token.
3873   SMLoc RegLoc = Parser.getTok().getLoc();
3874 
3875   int Reg = tryParseRegister();
3876   if (Reg == -1) {
3877     Error(RegLoc, "register expected");
3878     return MatchOperand_ParseFail;
3879   }
3880   unsigned Count = 1;
3881   int Spacing = 0;
3882   unsigned FirstReg = Reg;
3883   // The list is of D registers, but we also allow Q regs and just interpret
3884   // them as the two D sub-registers.
3885   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3886     FirstReg = Reg = getDRegFromQReg(Reg);
3887     Spacing = 1; // double-spacing requires explicit D registers, otherwise
3888                  // it's ambiguous with four-register single spaced.
3889     ++Reg;
3890     ++Count;
3891   }
3892 
3893   SMLoc E;
3894   if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3895     return MatchOperand_ParseFail;
3896 
3897   while (Parser.getTok().is(AsmToken::Comma) ||
3898          Parser.getTok().is(AsmToken::Minus)) {
3899     if (Parser.getTok().is(AsmToken::Minus)) {
3900       if (!Spacing)
3901         Spacing = 1; // Register range implies a single spaced list.
3902       else if (Spacing == 2) {
3903         Error(Parser.getTok().getLoc(),
3904               "sequential registers in double spaced list");
3905         return MatchOperand_ParseFail;
3906       }
3907       Parser.Lex(); // Eat the minus.
3908       SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3909       int EndReg = tryParseRegister();
3910       if (EndReg == -1) {
3911         Error(AfterMinusLoc, "register expected");
3912         return MatchOperand_ParseFail;
3913       }
3914       // Allow Q regs and just interpret them as the two D sub-registers.
3915       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3916         EndReg = getDRegFromQReg(EndReg) + 1;
3917       // If the register is the same as the start reg, there's nothing
3918       // more to do.
3919       if (Reg == EndReg)
3920         continue;
3921       // The register must be in the same register class as the first.
3922       if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3923         Error(AfterMinusLoc, "invalid register in register list");
3924         return MatchOperand_ParseFail;
3925       }
3926       // Ranges must go from low to high.
3927       if (Reg > EndReg) {
3928         Error(AfterMinusLoc, "bad range in register list");
3929         return MatchOperand_ParseFail;
3930       }
3931       // Parse the lane specifier if present.
3932       VectorLaneTy NextLaneKind;
3933       unsigned NextLaneIndex;
3934       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3935           MatchOperand_Success)
3936         return MatchOperand_ParseFail;
3937       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3938         Error(AfterMinusLoc, "mismatched lane index in register list");
3939         return MatchOperand_ParseFail;
3940       }
3941 
3942       // Add all the registers in the range to the register list.
3943       Count += EndReg - Reg;
3944       Reg = EndReg;
3945       continue;
3946     }
3947     Parser.Lex(); // Eat the comma.
3948     RegLoc = Parser.getTok().getLoc();
3949     int OldReg = Reg;
3950     Reg = tryParseRegister();
3951     if (Reg == -1) {
3952       Error(RegLoc, "register expected");
3953       return MatchOperand_ParseFail;
3954     }
3955     // vector register lists must be contiguous.
3956     // It's OK to use the enumeration values directly here rather, as the
3957     // VFP register classes have the enum sorted properly.
3958     //
3959     // The list is of D registers, but we also allow Q regs and just interpret
3960     // them as the two D sub-registers.
3961     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3962       if (!Spacing)
3963         Spacing = 1; // Register range implies a single spaced list.
3964       else if (Spacing == 2) {
3965         Error(RegLoc,
3966               "invalid register in double-spaced list (must be 'D' register')");
3967         return MatchOperand_ParseFail;
3968       }
3969       Reg = getDRegFromQReg(Reg);
3970       if (Reg != OldReg + 1) {
3971         Error(RegLoc, "non-contiguous register range");
3972         return MatchOperand_ParseFail;
3973       }
3974       ++Reg;
3975       Count += 2;
3976       // Parse the lane specifier if present.
3977       VectorLaneTy NextLaneKind;
3978       unsigned NextLaneIndex;
3979       SMLoc LaneLoc = Parser.getTok().getLoc();
3980       if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3981           MatchOperand_Success)
3982         return MatchOperand_ParseFail;
3983       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3984         Error(LaneLoc, "mismatched lane index in register list");
3985         return MatchOperand_ParseFail;
3986       }
3987       continue;
3988     }
3989     // Normal D register.
3990     // Figure out the register spacing (single or double) of the list if
3991     // we don't know it already.
3992     if (!Spacing)
3993       Spacing = 1 + (Reg == OldReg + 2);
3994 
3995     // Just check that it's contiguous and keep going.
3996     if (Reg != OldReg + Spacing) {
3997       Error(RegLoc, "non-contiguous register range");
3998       return MatchOperand_ParseFail;
3999     }
4000     ++Count;
4001     // Parse the lane specifier if present.
4002     VectorLaneTy NextLaneKind;
4003     unsigned NextLaneIndex;
4004     SMLoc EndLoc = Parser.getTok().getLoc();
4005     if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
4006       return MatchOperand_ParseFail;
4007     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
4008       Error(EndLoc, "mismatched lane index in register list");
4009       return MatchOperand_ParseFail;
4010     }
4011   }
4012 
4013   if (Parser.getTok().isNot(AsmToken::RCurly)) {
4014     Error(Parser.getTok().getLoc(), "'}' expected");
4015     return MatchOperand_ParseFail;
4016   }
4017   E = Parser.getTok().getEndLoc();
4018   Parser.Lex(); // Eat '}' token.
4019 
4020   switch (LaneKind) {
4021   case NoLanes:
4022     // Two-register operands have been converted to the
4023     // composite register classes.
4024     if (Count == 2) {
4025       const MCRegisterClass *RC = (Spacing == 1) ?
4026         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4027         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4028       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4029     }
4030     Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
4031                                                     (Spacing == 2), S, E));
4032     break;
4033   case AllLanes:
4034     // Two-register operands have been converted to the
4035     // composite register classes.
4036     if (Count == 2) {
4037       const MCRegisterClass *RC = (Spacing == 1) ?
4038         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
4039         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
4040       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
4041     }
4042     Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
4043                                                             (Spacing == 2),
4044                                                             S, E));
4045     break;
4046   case IndexedLane:
4047     Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
4048                                                            LaneIndex,
4049                                                            (Spacing == 2),
4050                                                            S, E));
4051     break;
4052   }
4053   return MatchOperand_Success;
4054 }
4055 
4056 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
4057 OperandMatchResultTy
4058 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) {
4059   MCAsmParser &Parser = getParser();
4060   SMLoc S = Parser.getTok().getLoc();
4061   const AsmToken &Tok = Parser.getTok();
4062   unsigned Opt;
4063 
4064   if (Tok.is(AsmToken::Identifier)) {
4065     StringRef OptStr = Tok.getString();
4066 
4067     Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
4068       .Case("sy",    ARM_MB::SY)
4069       .Case("st",    ARM_MB::ST)
4070       .Case("ld",    ARM_MB::LD)
4071       .Case("sh",    ARM_MB::ISH)
4072       .Case("ish",   ARM_MB::ISH)
4073       .Case("shst",  ARM_MB::ISHST)
4074       .Case("ishst", ARM_MB::ISHST)
4075       .Case("ishld", ARM_MB::ISHLD)
4076       .Case("nsh",   ARM_MB::NSH)
4077       .Case("un",    ARM_MB::NSH)
4078       .Case("nshst", ARM_MB::NSHST)
4079       .Case("nshld", ARM_MB::NSHLD)
4080       .Case("unst",  ARM_MB::NSHST)
4081       .Case("osh",   ARM_MB::OSH)
4082       .Case("oshst", ARM_MB::OSHST)
4083       .Case("oshld", ARM_MB::OSHLD)
4084       .Default(~0U);
4085 
4086     // ishld, oshld, nshld and ld are only available from ARMv8.
4087     if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD ||
4088                         Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD))
4089       Opt = ~0U;
4090 
4091     if (Opt == ~0U)
4092       return MatchOperand_NoMatch;
4093 
4094     Parser.Lex(); // Eat identifier token.
4095   } else if (Tok.is(AsmToken::Hash) ||
4096              Tok.is(AsmToken::Dollar) ||
4097              Tok.is(AsmToken::Integer)) {
4098     if (Parser.getTok().isNot(AsmToken::Integer))
4099       Parser.Lex(); // Eat '#' or '$'.
4100     SMLoc Loc = Parser.getTok().getLoc();
4101 
4102     const MCExpr *MemBarrierID;
4103     if (getParser().parseExpression(MemBarrierID)) {
4104       Error(Loc, "illegal expression");
4105       return MatchOperand_ParseFail;
4106     }
4107 
4108     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
4109     if (!CE) {
4110       Error(Loc, "constant expression expected");
4111       return MatchOperand_ParseFail;
4112     }
4113 
4114     int Val = CE->getValue();
4115     if (Val & ~0xf) {
4116       Error(Loc, "immediate value out of range");
4117       return MatchOperand_ParseFail;
4118     }
4119 
4120     Opt = ARM_MB::RESERVED_0 + Val;
4121   } else
4122     return MatchOperand_ParseFail;
4123 
4124   Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
4125   return MatchOperand_Success;
4126 }
4127 
4128 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options.
4129 OperandMatchResultTy
4130 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) {
4131   MCAsmParser &Parser = getParser();
4132   SMLoc S = Parser.getTok().getLoc();
4133   const AsmToken &Tok = Parser.getTok();
4134   unsigned Opt;
4135 
4136   if (Tok.is(AsmToken::Identifier)) {
4137     StringRef OptStr = Tok.getString();
4138 
4139     if (OptStr.equals_lower("sy"))
4140       Opt = ARM_ISB::SY;
4141     else
4142       return MatchOperand_NoMatch;
4143 
4144     Parser.Lex(); // Eat identifier token.
4145   } else if (Tok.is(AsmToken::Hash) ||
4146              Tok.is(AsmToken::Dollar) ||
4147              Tok.is(AsmToken::Integer)) {
4148     if (Parser.getTok().isNot(AsmToken::Integer))
4149       Parser.Lex(); // Eat '#' or '$'.
4150     SMLoc Loc = Parser.getTok().getLoc();
4151 
4152     const MCExpr *ISBarrierID;
4153     if (getParser().parseExpression(ISBarrierID)) {
4154       Error(Loc, "illegal expression");
4155       return MatchOperand_ParseFail;
4156     }
4157 
4158     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID);
4159     if (!CE) {
4160       Error(Loc, "constant expression expected");
4161       return MatchOperand_ParseFail;
4162     }
4163 
4164     int Val = CE->getValue();
4165     if (Val & ~0xf) {
4166       Error(Loc, "immediate value out of range");
4167       return MatchOperand_ParseFail;
4168     }
4169 
4170     Opt = ARM_ISB::RESERVED_0 + Val;
4171   } else
4172     return MatchOperand_ParseFail;
4173 
4174   Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt(
4175           (ARM_ISB::InstSyncBOpt)Opt, S));
4176   return MatchOperand_Success;
4177 }
4178 
4179 
4180 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
4181 OperandMatchResultTy
4182 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) {
4183   MCAsmParser &Parser = getParser();
4184   SMLoc S = Parser.getTok().getLoc();
4185   const AsmToken &Tok = Parser.getTok();
4186   if (!Tok.is(AsmToken::Identifier))
4187     return MatchOperand_NoMatch;
4188   StringRef IFlagsStr = Tok.getString();
4189 
4190   // An iflags string of "none" is interpreted to mean that none of the AIF
4191   // bits are set.  Not a terribly useful instruction, but a valid encoding.
4192   unsigned IFlags = 0;
4193   if (IFlagsStr != "none") {
4194         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
4195       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower())
4196         .Case("a", ARM_PROC::A)
4197         .Case("i", ARM_PROC::I)
4198         .Case("f", ARM_PROC::F)
4199         .Default(~0U);
4200 
4201       // If some specific iflag is already set, it means that some letter is
4202       // present more than once, this is not acceptable.
4203       if (Flag == ~0U || (IFlags & Flag))
4204         return MatchOperand_NoMatch;
4205 
4206       IFlags |= Flag;
4207     }
4208   }
4209 
4210   Parser.Lex(); // Eat identifier token.
4211   Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
4212   return MatchOperand_Success;
4213 }
4214 
4215 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
4216 OperandMatchResultTy
4217 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) {
4218   MCAsmParser &Parser = getParser();
4219   SMLoc S = Parser.getTok().getLoc();
4220   const AsmToken &Tok = Parser.getTok();
4221   if (!Tok.is(AsmToken::Identifier))
4222     return MatchOperand_NoMatch;
4223   StringRef Mask = Tok.getString();
4224 
4225   if (isMClass()) {
4226     auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower());
4227     if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits()))
4228       return MatchOperand_NoMatch;
4229 
4230     unsigned SYSmvalue = TheReg->Encoding & 0xFFF;
4231 
4232     Parser.Lex(); // Eat identifier token.
4233     Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S));
4234     return MatchOperand_Success;
4235   }
4236 
4237   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
4238   size_t Start = 0, Next = Mask.find('_');
4239   StringRef Flags = "";
4240   std::string SpecReg = Mask.slice(Start, Next).lower();
4241   if (Next != StringRef::npos)
4242     Flags = Mask.slice(Next+1, Mask.size());
4243 
4244   // FlagsVal contains the complete mask:
4245   // 3-0: Mask
4246   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4247   unsigned FlagsVal = 0;
4248 
4249   if (SpecReg == "apsr") {
4250     FlagsVal = StringSwitch<unsigned>(Flags)
4251     .Case("nzcvq",  0x8) // same as CPSR_f
4252     .Case("g",      0x4) // same as CPSR_s
4253     .Case("nzcvqg", 0xc) // same as CPSR_fs
4254     .Default(~0U);
4255 
4256     if (FlagsVal == ~0U) {
4257       if (!Flags.empty())
4258         return MatchOperand_NoMatch;
4259       else
4260         FlagsVal = 8; // No flag
4261     }
4262   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
4263     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
4264     if (Flags == "all" || Flags == "")
4265       Flags = "fc";
4266     for (int i = 0, e = Flags.size(); i != e; ++i) {
4267       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
4268       .Case("c", 1)
4269       .Case("x", 2)
4270       .Case("s", 4)
4271       .Case("f", 8)
4272       .Default(~0U);
4273 
4274       // If some specific flag is already set, it means that some letter is
4275       // present more than once, this is not acceptable.
4276       if (Flag == ~0U || (FlagsVal & Flag))
4277         return MatchOperand_NoMatch;
4278       FlagsVal |= Flag;
4279     }
4280   } else // No match for special register.
4281     return MatchOperand_NoMatch;
4282 
4283   // Special register without flags is NOT equivalent to "fc" flags.
4284   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
4285   // two lines would enable gas compatibility at the expense of breaking
4286   // round-tripping.
4287   //
4288   // if (!FlagsVal)
4289   //  FlagsVal = 0x9;
4290 
4291   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
4292   if (SpecReg == "spsr")
4293     FlagsVal |= 16;
4294 
4295   Parser.Lex(); // Eat identifier token.
4296   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
4297   return MatchOperand_Success;
4298 }
4299 
4300 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for
4301 /// use in the MRS/MSR instructions added to support virtualization.
4302 OperandMatchResultTy
4303 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) {
4304   MCAsmParser &Parser = getParser();
4305   SMLoc S = Parser.getTok().getLoc();
4306   const AsmToken &Tok = Parser.getTok();
4307   if (!Tok.is(AsmToken::Identifier))
4308     return MatchOperand_NoMatch;
4309   StringRef RegName = Tok.getString();
4310 
4311   auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower());
4312   if (!TheReg)
4313     return MatchOperand_NoMatch;
4314   unsigned Encoding = TheReg->Encoding;
4315 
4316   Parser.Lex(); // Eat identifier token.
4317   Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S));
4318   return MatchOperand_Success;
4319 }
4320 
4321 OperandMatchResultTy
4322 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low,
4323                           int High) {
4324   MCAsmParser &Parser = getParser();
4325   const AsmToken &Tok = Parser.getTok();
4326   if (Tok.isNot(AsmToken::Identifier)) {
4327     Error(Parser.getTok().getLoc(), Op + " operand expected.");
4328     return MatchOperand_ParseFail;
4329   }
4330   StringRef ShiftName = Tok.getString();
4331   std::string LowerOp = Op.lower();
4332   std::string UpperOp = Op.upper();
4333   if (ShiftName != LowerOp && ShiftName != UpperOp) {
4334     Error(Parser.getTok().getLoc(), Op + " operand expected.");
4335     return MatchOperand_ParseFail;
4336   }
4337   Parser.Lex(); // Eat shift type token.
4338 
4339   // There must be a '#' and a shift amount.
4340   if (Parser.getTok().isNot(AsmToken::Hash) &&
4341       Parser.getTok().isNot(AsmToken::Dollar)) {
4342     Error(Parser.getTok().getLoc(), "'#' expected");
4343     return MatchOperand_ParseFail;
4344   }
4345   Parser.Lex(); // Eat hash token.
4346 
4347   const MCExpr *ShiftAmount;
4348   SMLoc Loc = Parser.getTok().getLoc();
4349   SMLoc EndLoc;
4350   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4351     Error(Loc, "illegal expression");
4352     return MatchOperand_ParseFail;
4353   }
4354   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4355   if (!CE) {
4356     Error(Loc, "constant expression expected");
4357     return MatchOperand_ParseFail;
4358   }
4359   int Val = CE->getValue();
4360   if (Val < Low || Val > High) {
4361     Error(Loc, "immediate value out of range");
4362     return MatchOperand_ParseFail;
4363   }
4364 
4365   Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
4366 
4367   return MatchOperand_Success;
4368 }
4369 
4370 OperandMatchResultTy
4371 ARMAsmParser::parseSetEndImm(OperandVector &Operands) {
4372   MCAsmParser &Parser = getParser();
4373   const AsmToken &Tok = Parser.getTok();
4374   SMLoc S = Tok.getLoc();
4375   if (Tok.isNot(AsmToken::Identifier)) {
4376     Error(S, "'be' or 'le' operand expected");
4377     return MatchOperand_ParseFail;
4378   }
4379   int Val = StringSwitch<int>(Tok.getString().lower())
4380     .Case("be", 1)
4381     .Case("le", 0)
4382     .Default(-1);
4383   Parser.Lex(); // Eat the token.
4384 
4385   if (Val == -1) {
4386     Error(S, "'be' or 'le' operand expected");
4387     return MatchOperand_ParseFail;
4388   }
4389   Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val,
4390                                                                   getContext()),
4391                                            S, Tok.getEndLoc()));
4392   return MatchOperand_Success;
4393 }
4394 
4395 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
4396 /// instructions. Legal values are:
4397 ///     lsl #n  'n' in [0,31]
4398 ///     asr #n  'n' in [1,32]
4399 ///             n == 32 encoded as n == 0.
4400 OperandMatchResultTy
4401 ARMAsmParser::parseShifterImm(OperandVector &Operands) {
4402   MCAsmParser &Parser = getParser();
4403   const AsmToken &Tok = Parser.getTok();
4404   SMLoc S = Tok.getLoc();
4405   if (Tok.isNot(AsmToken::Identifier)) {
4406     Error(S, "shift operator 'asr' or 'lsl' expected");
4407     return MatchOperand_ParseFail;
4408   }
4409   StringRef ShiftName = Tok.getString();
4410   bool isASR;
4411   if (ShiftName == "lsl" || ShiftName == "LSL")
4412     isASR = false;
4413   else if (ShiftName == "asr" || ShiftName == "ASR")
4414     isASR = true;
4415   else {
4416     Error(S, "shift operator 'asr' or 'lsl' expected");
4417     return MatchOperand_ParseFail;
4418   }
4419   Parser.Lex(); // Eat the operator.
4420 
4421   // A '#' and a shift amount.
4422   if (Parser.getTok().isNot(AsmToken::Hash) &&
4423       Parser.getTok().isNot(AsmToken::Dollar)) {
4424     Error(Parser.getTok().getLoc(), "'#' expected");
4425     return MatchOperand_ParseFail;
4426   }
4427   Parser.Lex(); // Eat hash token.
4428   SMLoc ExLoc = Parser.getTok().getLoc();
4429 
4430   const MCExpr *ShiftAmount;
4431   SMLoc EndLoc;
4432   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4433     Error(ExLoc, "malformed shift expression");
4434     return MatchOperand_ParseFail;
4435   }
4436   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4437   if (!CE) {
4438     Error(ExLoc, "shift amount must be an immediate");
4439     return MatchOperand_ParseFail;
4440   }
4441 
4442   int64_t Val = CE->getValue();
4443   if (isASR) {
4444     // Shift amount must be in [1,32]
4445     if (Val < 1 || Val > 32) {
4446       Error(ExLoc, "'asr' shift amount must be in range [1,32]");
4447       return MatchOperand_ParseFail;
4448     }
4449     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
4450     if (isThumb() && Val == 32) {
4451       Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
4452       return MatchOperand_ParseFail;
4453     }
4454     if (Val == 32) Val = 0;
4455   } else {
4456     // Shift amount must be in [1,32]
4457     if (Val < 0 || Val > 31) {
4458       Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
4459       return MatchOperand_ParseFail;
4460     }
4461   }
4462 
4463   Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
4464 
4465   return MatchOperand_Success;
4466 }
4467 
4468 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
4469 /// of instructions. Legal values are:
4470 ///     ror #n  'n' in {0, 8, 16, 24}
4471 OperandMatchResultTy
4472 ARMAsmParser::parseRotImm(OperandVector &Operands) {
4473   MCAsmParser &Parser = getParser();
4474   const AsmToken &Tok = Parser.getTok();
4475   SMLoc S = Tok.getLoc();
4476   if (Tok.isNot(AsmToken::Identifier))
4477     return MatchOperand_NoMatch;
4478   StringRef ShiftName = Tok.getString();
4479   if (ShiftName != "ror" && ShiftName != "ROR")
4480     return MatchOperand_NoMatch;
4481   Parser.Lex(); // Eat the operator.
4482 
4483   // A '#' and a rotate amount.
4484   if (Parser.getTok().isNot(AsmToken::Hash) &&
4485       Parser.getTok().isNot(AsmToken::Dollar)) {
4486     Error(Parser.getTok().getLoc(), "'#' expected");
4487     return MatchOperand_ParseFail;
4488   }
4489   Parser.Lex(); // Eat hash token.
4490   SMLoc ExLoc = Parser.getTok().getLoc();
4491 
4492   const MCExpr *ShiftAmount;
4493   SMLoc EndLoc;
4494   if (getParser().parseExpression(ShiftAmount, EndLoc)) {
4495     Error(ExLoc, "malformed rotate expression");
4496     return MatchOperand_ParseFail;
4497   }
4498   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
4499   if (!CE) {
4500     Error(ExLoc, "rotate amount must be an immediate");
4501     return MatchOperand_ParseFail;
4502   }
4503 
4504   int64_t Val = CE->getValue();
4505   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
4506   // normally, zero is represented in asm by omitting the rotate operand
4507   // entirely.
4508   if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
4509     Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
4510     return MatchOperand_ParseFail;
4511   }
4512 
4513   Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
4514 
4515   return MatchOperand_Success;
4516 }
4517 
4518 OperandMatchResultTy
4519 ARMAsmParser::parseModImm(OperandVector &Operands) {
4520   MCAsmParser &Parser = getParser();
4521   MCAsmLexer &Lexer = getLexer();
4522   int64_t Imm1, Imm2;
4523 
4524   SMLoc S = Parser.getTok().getLoc();
4525 
4526   // 1) A mod_imm operand can appear in the place of a register name:
4527   //   add r0, #mod_imm
4528   //   add r0, r0, #mod_imm
4529   // to correctly handle the latter, we bail out as soon as we see an
4530   // identifier.
4531   //
4532   // 2) Similarly, we do not want to parse into complex operands:
4533   //   mov r0, #mod_imm
4534   //   mov r0, :lower16:(_foo)
4535   if (Parser.getTok().is(AsmToken::Identifier) ||
4536       Parser.getTok().is(AsmToken::Colon))
4537     return MatchOperand_NoMatch;
4538 
4539   // Hash (dollar) is optional as per the ARMARM
4540   if (Parser.getTok().is(AsmToken::Hash) ||
4541       Parser.getTok().is(AsmToken::Dollar)) {
4542     // Avoid parsing into complex operands (#:)
4543     if (Lexer.peekTok().is(AsmToken::Colon))
4544       return MatchOperand_NoMatch;
4545 
4546     // Eat the hash (dollar)
4547     Parser.Lex();
4548   }
4549 
4550   SMLoc Sx1, Ex1;
4551   Sx1 = Parser.getTok().getLoc();
4552   const MCExpr *Imm1Exp;
4553   if (getParser().parseExpression(Imm1Exp, Ex1)) {
4554     Error(Sx1, "malformed expression");
4555     return MatchOperand_ParseFail;
4556   }
4557 
4558   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp);
4559 
4560   if (CE) {
4561     // Immediate must fit within 32-bits
4562     Imm1 = CE->getValue();
4563     int Enc = ARM_AM::getSOImmVal(Imm1);
4564     if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) {
4565       // We have a match!
4566       Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF),
4567                                                   (Enc & 0xF00) >> 7,
4568                                                   Sx1, Ex1));
4569       return MatchOperand_Success;
4570     }
4571 
4572     // We have parsed an immediate which is not for us, fallback to a plain
4573     // immediate. This can happen for instruction aliases. For an example,
4574     // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform
4575     // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite
4576     // instruction with a mod_imm operand. The alias is defined such that the
4577     // parser method is shared, that's why we have to do this here.
4578     if (Parser.getTok().is(AsmToken::EndOfStatement)) {
4579       Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4580       return MatchOperand_Success;
4581     }
4582   } else {
4583     // Operands like #(l1 - l2) can only be evaluated at a later stage (via an
4584     // MCFixup). Fallback to a plain immediate.
4585     Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1));
4586     return MatchOperand_Success;
4587   }
4588 
4589   // From this point onward, we expect the input to be a (#bits, #rot) pair
4590   if (Parser.getTok().isNot(AsmToken::Comma)) {
4591     Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]");
4592     return MatchOperand_ParseFail;
4593   }
4594 
4595   if (Imm1 & ~0xFF) {
4596     Error(Sx1, "immediate operand must a number in the range [0, 255]");
4597     return MatchOperand_ParseFail;
4598   }
4599 
4600   // Eat the comma
4601   Parser.Lex();
4602 
4603   // Repeat for #rot
4604   SMLoc Sx2, Ex2;
4605   Sx2 = Parser.getTok().getLoc();
4606 
4607   // Eat the optional hash (dollar)
4608   if (Parser.getTok().is(AsmToken::Hash) ||
4609       Parser.getTok().is(AsmToken::Dollar))
4610     Parser.Lex();
4611 
4612   const MCExpr *Imm2Exp;
4613   if (getParser().parseExpression(Imm2Exp, Ex2)) {
4614     Error(Sx2, "malformed expression");
4615     return MatchOperand_ParseFail;
4616   }
4617 
4618   CE = dyn_cast<MCConstantExpr>(Imm2Exp);
4619 
4620   if (CE) {
4621     Imm2 = CE->getValue();
4622     if (!(Imm2 & ~0x1E)) {
4623       // We have a match!
4624       Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2));
4625       return MatchOperand_Success;
4626     }
4627     Error(Sx2, "immediate operand must an even number in the range [0, 30]");
4628     return MatchOperand_ParseFail;
4629   } else {
4630     Error(Sx2, "constant expression expected");
4631     return MatchOperand_ParseFail;
4632   }
4633 }
4634 
4635 OperandMatchResultTy
4636 ARMAsmParser::parseBitfield(OperandVector &Operands) {
4637   MCAsmParser &Parser = getParser();
4638   SMLoc S = Parser.getTok().getLoc();
4639   // The bitfield descriptor is really two operands, the LSB and the width.
4640   if (Parser.getTok().isNot(AsmToken::Hash) &&
4641       Parser.getTok().isNot(AsmToken::Dollar)) {
4642     Error(Parser.getTok().getLoc(), "'#' expected");
4643     return MatchOperand_ParseFail;
4644   }
4645   Parser.Lex(); // Eat hash token.
4646 
4647   const MCExpr *LSBExpr;
4648   SMLoc E = Parser.getTok().getLoc();
4649   if (getParser().parseExpression(LSBExpr)) {
4650     Error(E, "malformed immediate expression");
4651     return MatchOperand_ParseFail;
4652   }
4653   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
4654   if (!CE) {
4655     Error(E, "'lsb' operand must be an immediate");
4656     return MatchOperand_ParseFail;
4657   }
4658 
4659   int64_t LSB = CE->getValue();
4660   // The LSB must be in the range [0,31]
4661   if (LSB < 0 || LSB > 31) {
4662     Error(E, "'lsb' operand must be in the range [0,31]");
4663     return MatchOperand_ParseFail;
4664   }
4665   E = Parser.getTok().getLoc();
4666 
4667   // Expect another immediate operand.
4668   if (Parser.getTok().isNot(AsmToken::Comma)) {
4669     Error(Parser.getTok().getLoc(), "too few operands");
4670     return MatchOperand_ParseFail;
4671   }
4672   Parser.Lex(); // Eat hash token.
4673   if (Parser.getTok().isNot(AsmToken::Hash) &&
4674       Parser.getTok().isNot(AsmToken::Dollar)) {
4675     Error(Parser.getTok().getLoc(), "'#' expected");
4676     return MatchOperand_ParseFail;
4677   }
4678   Parser.Lex(); // Eat hash token.
4679 
4680   const MCExpr *WidthExpr;
4681   SMLoc EndLoc;
4682   if (getParser().parseExpression(WidthExpr, EndLoc)) {
4683     Error(E, "malformed immediate expression");
4684     return MatchOperand_ParseFail;
4685   }
4686   CE = dyn_cast<MCConstantExpr>(WidthExpr);
4687   if (!CE) {
4688     Error(E, "'width' operand must be an immediate");
4689     return MatchOperand_ParseFail;
4690   }
4691 
4692   int64_t Width = CE->getValue();
4693   // The LSB must be in the range [1,32-lsb]
4694   if (Width < 1 || Width > 32 - LSB) {
4695     Error(E, "'width' operand must be in the range [1,32-lsb]");
4696     return MatchOperand_ParseFail;
4697   }
4698 
4699   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
4700 
4701   return MatchOperand_Success;
4702 }
4703 
4704 OperandMatchResultTy
4705 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) {
4706   // Check for a post-index addressing register operand. Specifically:
4707   // postidx_reg := '+' register {, shift}
4708   //              | '-' register {, shift}
4709   //              | register {, shift}
4710 
4711   // This method must return MatchOperand_NoMatch without consuming any tokens
4712   // in the case where there is no match, as other alternatives take other
4713   // parse methods.
4714   MCAsmParser &Parser = getParser();
4715   AsmToken Tok = Parser.getTok();
4716   SMLoc S = Tok.getLoc();
4717   bool haveEaten = false;
4718   bool isAdd = true;
4719   if (Tok.is(AsmToken::Plus)) {
4720     Parser.Lex(); // Eat the '+' token.
4721     haveEaten = true;
4722   } else if (Tok.is(AsmToken::Minus)) {
4723     Parser.Lex(); // Eat the '-' token.
4724     isAdd = false;
4725     haveEaten = true;
4726   }
4727 
4728   SMLoc E = Parser.getTok().getEndLoc();
4729   int Reg = tryParseRegister();
4730   if (Reg == -1) {
4731     if (!haveEaten)
4732       return MatchOperand_NoMatch;
4733     Error(Parser.getTok().getLoc(), "register expected");
4734     return MatchOperand_ParseFail;
4735   }
4736 
4737   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
4738   unsigned ShiftImm = 0;
4739   if (Parser.getTok().is(AsmToken::Comma)) {
4740     Parser.Lex(); // Eat the ','.
4741     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
4742       return MatchOperand_ParseFail;
4743 
4744     // FIXME: Only approximates end...may include intervening whitespace.
4745     E = Parser.getTok().getLoc();
4746   }
4747 
4748   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
4749                                                   ShiftImm, S, E));
4750 
4751   return MatchOperand_Success;
4752 }
4753 
4754 OperandMatchResultTy
4755 ARMAsmParser::parseAM3Offset(OperandVector &Operands) {
4756   // Check for a post-index addressing register operand. Specifically:
4757   // am3offset := '+' register
4758   //              | '-' register
4759   //              | register
4760   //              | # imm
4761   //              | # + imm
4762   //              | # - imm
4763 
4764   // This method must return MatchOperand_NoMatch without consuming any tokens
4765   // in the case where there is no match, as other alternatives take other
4766   // parse methods.
4767   MCAsmParser &Parser = getParser();
4768   AsmToken Tok = Parser.getTok();
4769   SMLoc S = Tok.getLoc();
4770 
4771   // Do immediates first, as we always parse those if we have a '#'.
4772   if (Parser.getTok().is(AsmToken::Hash) ||
4773       Parser.getTok().is(AsmToken::Dollar)) {
4774     Parser.Lex(); // Eat '#' or '$'.
4775     // Explicitly look for a '-', as we need to encode negative zero
4776     // differently.
4777     bool isNegative = Parser.getTok().is(AsmToken::Minus);
4778     const MCExpr *Offset;
4779     SMLoc E;
4780     if (getParser().parseExpression(Offset, E))
4781       return MatchOperand_ParseFail;
4782     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4783     if (!CE) {
4784       Error(S, "constant expression expected");
4785       return MatchOperand_ParseFail;
4786     }
4787     // Negative zero is encoded as the flag value
4788     // std::numeric_limits<int32_t>::min().
4789     int32_t Val = CE->getValue();
4790     if (isNegative && Val == 0)
4791       Val = std::numeric_limits<int32_t>::min();
4792 
4793     Operands.push_back(
4794       ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E));
4795 
4796     return MatchOperand_Success;
4797   }
4798 
4799   bool haveEaten = false;
4800   bool isAdd = true;
4801   if (Tok.is(AsmToken::Plus)) {
4802     Parser.Lex(); // Eat the '+' token.
4803     haveEaten = true;
4804   } else if (Tok.is(AsmToken::Minus)) {
4805     Parser.Lex(); // Eat the '-' token.
4806     isAdd = false;
4807     haveEaten = true;
4808   }
4809 
4810   Tok = Parser.getTok();
4811   int Reg = tryParseRegister();
4812   if (Reg == -1) {
4813     if (!haveEaten)
4814       return MatchOperand_NoMatch;
4815     Error(Tok.getLoc(), "register expected");
4816     return MatchOperand_ParseFail;
4817   }
4818 
4819   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
4820                                                   0, S, Tok.getEndLoc()));
4821 
4822   return MatchOperand_Success;
4823 }
4824 
4825 /// Convert parsed operands to MCInst.  Needed here because this instruction
4826 /// only has two register operands, but multiplication is commutative so
4827 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN".
4828 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst,
4829                                     const OperandVector &Operands) {
4830   ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1);
4831   ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1);
4832   // If we have a three-operand form, make sure to set Rn to be the operand
4833   // that isn't the same as Rd.
4834   unsigned RegOp = 4;
4835   if (Operands.size() == 6 &&
4836       ((ARMOperand &)*Operands[4]).getReg() ==
4837           ((ARMOperand &)*Operands[3]).getReg())
4838     RegOp = 5;
4839   ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1);
4840   Inst.addOperand(Inst.getOperand(0));
4841   ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2);
4842 }
4843 
4844 void ARMAsmParser::cvtThumbBranches(MCInst &Inst,
4845                                     const OperandVector &Operands) {
4846   int CondOp = -1, ImmOp = -1;
4847   switch(Inst.getOpcode()) {
4848     case ARM::tB:
4849     case ARM::tBcc:  CondOp = 1; ImmOp = 2; break;
4850 
4851     case ARM::t2B:
4852     case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break;
4853 
4854     default: llvm_unreachable("Unexpected instruction in cvtThumbBranches");
4855   }
4856   // first decide whether or not the branch should be conditional
4857   // by looking at it's location relative to an IT block
4858   if(inITBlock()) {
4859     // inside an IT block we cannot have any conditional branches. any
4860     // such instructions needs to be converted to unconditional form
4861     switch(Inst.getOpcode()) {
4862       case ARM::tBcc: Inst.setOpcode(ARM::tB); break;
4863       case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break;
4864     }
4865   } else {
4866     // outside IT blocks we can only have unconditional branches with AL
4867     // condition code or conditional branches with non-AL condition code
4868     unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode();
4869     switch(Inst.getOpcode()) {
4870       case ARM::tB:
4871       case ARM::tBcc:
4872         Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc);
4873         break;
4874       case ARM::t2B:
4875       case ARM::t2Bcc:
4876         Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc);
4877         break;
4878     }
4879   }
4880 
4881   // now decide on encoding size based on branch target range
4882   switch(Inst.getOpcode()) {
4883     // classify tB as either t2B or t1B based on range of immediate operand
4884     case ARM::tB: {
4885       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4886       if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline())
4887         Inst.setOpcode(ARM::t2B);
4888       break;
4889     }
4890     // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand
4891     case ARM::tBcc: {
4892       ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]);
4893       if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline())
4894         Inst.setOpcode(ARM::t2Bcc);
4895       break;
4896     }
4897   }
4898   ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1);
4899   ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2);
4900 }
4901 
4902 /// Parse an ARM memory expression, return false if successful else return true
4903 /// or an error.  The first token must be a '[' when called.
4904 bool ARMAsmParser::parseMemory(OperandVector &Operands) {
4905   MCAsmParser &Parser = getParser();
4906   SMLoc S, E;
4907   if (Parser.getTok().isNot(AsmToken::LBrac))
4908     return TokError("Token is not a Left Bracket");
4909   S = Parser.getTok().getLoc();
4910   Parser.Lex(); // Eat left bracket token.
4911 
4912   const AsmToken &BaseRegTok = Parser.getTok();
4913   int BaseRegNum = tryParseRegister();
4914   if (BaseRegNum == -1)
4915     return Error(BaseRegTok.getLoc(), "register expected");
4916 
4917   // The next token must either be a comma, a colon or a closing bracket.
4918   const AsmToken &Tok = Parser.getTok();
4919   if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4920       !Tok.is(AsmToken::RBrac))
4921     return Error(Tok.getLoc(), "malformed memory operand");
4922 
4923   if (Tok.is(AsmToken::RBrac)) {
4924     E = Tok.getEndLoc();
4925     Parser.Lex(); // Eat right bracket token.
4926 
4927     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4928                                              ARM_AM::no_shift, 0, 0, false,
4929                                              S, E));
4930 
4931     // If there's a pre-indexing writeback marker, '!', just add it as a token
4932     // operand. It's rather odd, but syntactically valid.
4933     if (Parser.getTok().is(AsmToken::Exclaim)) {
4934       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4935       Parser.Lex(); // Eat the '!'.
4936     }
4937 
4938     return false;
4939   }
4940 
4941   assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
4942          "Lost colon or comma in memory operand?!");
4943   if (Tok.is(AsmToken::Comma)) {
4944     Parser.Lex(); // Eat the comma.
4945   }
4946 
4947   // If we have a ':', it's an alignment specifier.
4948   if (Parser.getTok().is(AsmToken::Colon)) {
4949     Parser.Lex(); // Eat the ':'.
4950     E = Parser.getTok().getLoc();
4951     SMLoc AlignmentLoc = Tok.getLoc();
4952 
4953     const MCExpr *Expr;
4954     if (getParser().parseExpression(Expr))
4955      return true;
4956 
4957     // The expression has to be a constant. Memory references with relocations
4958     // don't come through here, as they use the <label> forms of the relevant
4959     // instructions.
4960     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4961     if (!CE)
4962       return Error (E, "constant expression expected");
4963 
4964     unsigned Align = 0;
4965     switch (CE->getValue()) {
4966     default:
4967       return Error(E,
4968                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4969     case 16:  Align = 2; break;
4970     case 32:  Align = 4; break;
4971     case 64:  Align = 8; break;
4972     case 128: Align = 16; break;
4973     case 256: Align = 32; break;
4974     }
4975 
4976     // Now we should have the closing ']'
4977     if (Parser.getTok().isNot(AsmToken::RBrac))
4978       return Error(Parser.getTok().getLoc(), "']' expected");
4979     E = Parser.getTok().getEndLoc();
4980     Parser.Lex(); // Eat right bracket token.
4981 
4982     // Don't worry about range checking the value here. That's handled by
4983     // the is*() predicates.
4984     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0,
4985                                              ARM_AM::no_shift, 0, Align,
4986                                              false, S, E, AlignmentLoc));
4987 
4988     // If there's a pre-indexing writeback marker, '!', just add it as a token
4989     // operand.
4990     if (Parser.getTok().is(AsmToken::Exclaim)) {
4991       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4992       Parser.Lex(); // Eat the '!'.
4993     }
4994 
4995     return false;
4996   }
4997 
4998   // If we have a '#', it's an immediate offset, else assume it's a register
4999   // offset. Be friendly and also accept a plain integer (without a leading
5000   // hash) for gas compatibility.
5001   if (Parser.getTok().is(AsmToken::Hash) ||
5002       Parser.getTok().is(AsmToken::Dollar) ||
5003       Parser.getTok().is(AsmToken::Integer)) {
5004     if (Parser.getTok().isNot(AsmToken::Integer))
5005       Parser.Lex(); // Eat '#' or '$'.
5006     E = Parser.getTok().getLoc();
5007 
5008     bool isNegative = getParser().getTok().is(AsmToken::Minus);
5009     const MCExpr *Offset;
5010     if (getParser().parseExpression(Offset))
5011      return true;
5012 
5013     // The expression has to be a constant. Memory references with relocations
5014     // don't come through here, as they use the <label> forms of the relevant
5015     // instructions.
5016     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
5017     if (!CE)
5018       return Error (E, "constant expression expected");
5019 
5020     // If the constant was #-0, represent it as
5021     // std::numeric_limits<int32_t>::min().
5022     int32_t Val = CE->getValue();
5023     if (isNegative && Val == 0)
5024       CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5025                                   getContext());
5026 
5027     // Now we should have the closing ']'
5028     if (Parser.getTok().isNot(AsmToken::RBrac))
5029       return Error(Parser.getTok().getLoc(), "']' expected");
5030     E = Parser.getTok().getEndLoc();
5031     Parser.Lex(); // Eat right bracket token.
5032 
5033     // Don't worry about range checking the value here. That's handled by
5034     // the is*() predicates.
5035     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
5036                                              ARM_AM::no_shift, 0, 0,
5037                                              false, S, E));
5038 
5039     // If there's a pre-indexing writeback marker, '!', just add it as a token
5040     // operand.
5041     if (Parser.getTok().is(AsmToken::Exclaim)) {
5042       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5043       Parser.Lex(); // Eat the '!'.
5044     }
5045 
5046     return false;
5047   }
5048 
5049   // The register offset is optionally preceded by a '+' or '-'
5050   bool isNegative = false;
5051   if (Parser.getTok().is(AsmToken::Minus)) {
5052     isNegative = true;
5053     Parser.Lex(); // Eat the '-'.
5054   } else if (Parser.getTok().is(AsmToken::Plus)) {
5055     // Nothing to do.
5056     Parser.Lex(); // Eat the '+'.
5057   }
5058 
5059   E = Parser.getTok().getLoc();
5060   int OffsetRegNum = tryParseRegister();
5061   if (OffsetRegNum == -1)
5062     return Error(E, "register expected");
5063 
5064   // If there's a shift operator, handle it.
5065   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
5066   unsigned ShiftImm = 0;
5067   if (Parser.getTok().is(AsmToken::Comma)) {
5068     Parser.Lex(); // Eat the ','.
5069     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
5070       return true;
5071   }
5072 
5073   // Now we should have the closing ']'
5074   if (Parser.getTok().isNot(AsmToken::RBrac))
5075     return Error(Parser.getTok().getLoc(), "']' expected");
5076   E = Parser.getTok().getEndLoc();
5077   Parser.Lex(); // Eat right bracket token.
5078 
5079   Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum,
5080                                            ShiftType, ShiftImm, 0, isNegative,
5081                                            S, E));
5082 
5083   // If there's a pre-indexing writeback marker, '!', just add it as a token
5084   // operand.
5085   if (Parser.getTok().is(AsmToken::Exclaim)) {
5086     Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
5087     Parser.Lex(); // Eat the '!'.
5088   }
5089 
5090   return false;
5091 }
5092 
5093 /// parseMemRegOffsetShift - one of these two:
5094 ///   ( lsl | lsr | asr | ror ) , # shift_amount
5095 ///   rrx
5096 /// return true if it parses a shift otherwise it returns false.
5097 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
5098                                           unsigned &Amount) {
5099   MCAsmParser &Parser = getParser();
5100   SMLoc Loc = Parser.getTok().getLoc();
5101   const AsmToken &Tok = Parser.getTok();
5102   if (Tok.isNot(AsmToken::Identifier))
5103     return true;
5104   StringRef ShiftName = Tok.getString();
5105   if (ShiftName == "lsl" || ShiftName == "LSL" ||
5106       ShiftName == "asl" || ShiftName == "ASL")
5107     St = ARM_AM::lsl;
5108   else if (ShiftName == "lsr" || ShiftName == "LSR")
5109     St = ARM_AM::lsr;
5110   else if (ShiftName == "asr" || ShiftName == "ASR")
5111     St = ARM_AM::asr;
5112   else if (ShiftName == "ror" || ShiftName == "ROR")
5113     St = ARM_AM::ror;
5114   else if (ShiftName == "rrx" || ShiftName == "RRX")
5115     St = ARM_AM::rrx;
5116   else
5117     return Error(Loc, "illegal shift operator");
5118   Parser.Lex(); // Eat shift type token.
5119 
5120   // rrx stands alone.
5121   Amount = 0;
5122   if (St != ARM_AM::rrx) {
5123     Loc = Parser.getTok().getLoc();
5124     // A '#' and a shift amount.
5125     const AsmToken &HashTok = Parser.getTok();
5126     if (HashTok.isNot(AsmToken::Hash) &&
5127         HashTok.isNot(AsmToken::Dollar))
5128       return Error(HashTok.getLoc(), "'#' expected");
5129     Parser.Lex(); // Eat hash token.
5130 
5131     const MCExpr *Expr;
5132     if (getParser().parseExpression(Expr))
5133       return true;
5134     // Range check the immediate.
5135     // lsl, ror: 0 <= imm <= 31
5136     // lsr, asr: 0 <= imm <= 32
5137     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
5138     if (!CE)
5139       return Error(Loc, "shift amount must be an immediate");
5140     int64_t Imm = CE->getValue();
5141     if (Imm < 0 ||
5142         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
5143         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
5144       return Error(Loc, "immediate shift value out of range");
5145     // If <ShiftTy> #0, turn it into a no_shift.
5146     if (Imm == 0)
5147       St = ARM_AM::lsl;
5148     // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
5149     if (Imm == 32)
5150       Imm = 0;
5151     Amount = Imm;
5152   }
5153 
5154   return false;
5155 }
5156 
5157 /// parseFPImm - A floating point immediate expression operand.
5158 OperandMatchResultTy
5159 ARMAsmParser::parseFPImm(OperandVector &Operands) {
5160   MCAsmParser &Parser = getParser();
5161   // Anything that can accept a floating point constant as an operand
5162   // needs to go through here, as the regular parseExpression is
5163   // integer only.
5164   //
5165   // This routine still creates a generic Immediate operand, containing
5166   // a bitcast of the 64-bit floating point value. The various operands
5167   // that accept floats can check whether the value is valid for them
5168   // via the standard is*() predicates.
5169 
5170   SMLoc S = Parser.getTok().getLoc();
5171 
5172   if (Parser.getTok().isNot(AsmToken::Hash) &&
5173       Parser.getTok().isNot(AsmToken::Dollar))
5174     return MatchOperand_NoMatch;
5175 
5176   // Disambiguate the VMOV forms that can accept an FP immediate.
5177   // vmov.f32 <sreg>, #imm
5178   // vmov.f64 <dreg>, #imm
5179   // vmov.f32 <dreg>, #imm  @ vector f32x2
5180   // vmov.f32 <qreg>, #imm  @ vector f32x4
5181   //
5182   // There are also the NEON VMOV instructions which expect an
5183   // integer constant. Make sure we don't try to parse an FPImm
5184   // for these:
5185   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
5186   ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]);
5187   bool isVmovf = TyOp.isToken() &&
5188                  (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" ||
5189                   TyOp.getToken() == ".f16");
5190   ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]);
5191   bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" ||
5192                                          Mnemonic.getToken() == "fconsts");
5193   if (!(isVmovf || isFconst))
5194     return MatchOperand_NoMatch;
5195 
5196   Parser.Lex(); // Eat '#' or '$'.
5197 
5198   // Handle negation, as that still comes through as a separate token.
5199   bool isNegative = false;
5200   if (Parser.getTok().is(AsmToken::Minus)) {
5201     isNegative = true;
5202     Parser.Lex();
5203   }
5204   const AsmToken &Tok = Parser.getTok();
5205   SMLoc Loc = Tok.getLoc();
5206   if (Tok.is(AsmToken::Real) && isVmovf) {
5207     APFloat RealVal(APFloat::IEEEsingle(), Tok.getString());
5208     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5209     // If we had a '-' in front, toggle the sign bit.
5210     IntVal ^= (uint64_t)isNegative << 31;
5211     Parser.Lex(); // Eat the token.
5212     Operands.push_back(ARMOperand::CreateImm(
5213           MCConstantExpr::create(IntVal, getContext()),
5214           S, Parser.getTok().getLoc()));
5215     return MatchOperand_Success;
5216   }
5217   // Also handle plain integers. Instructions which allow floating point
5218   // immediates also allow a raw encoded 8-bit value.
5219   if (Tok.is(AsmToken::Integer) && isFconst) {
5220     int64_t Val = Tok.getIntVal();
5221     Parser.Lex(); // Eat the token.
5222     if (Val > 255 || Val < 0) {
5223       Error(Loc, "encoded floating point value out of range");
5224       return MatchOperand_ParseFail;
5225     }
5226     float RealVal = ARM_AM::getFPImmFloat(Val);
5227     Val = APFloat(RealVal).bitcastToAPInt().getZExtValue();
5228 
5229     Operands.push_back(ARMOperand::CreateImm(
5230         MCConstantExpr::create(Val, getContext()), S,
5231         Parser.getTok().getLoc()));
5232     return MatchOperand_Success;
5233   }
5234 
5235   Error(Loc, "invalid floating point immediate");
5236   return MatchOperand_ParseFail;
5237 }
5238 
5239 /// Parse a arm instruction operand.  For now this parses the operand regardless
5240 /// of the mnemonic.
5241 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
5242   MCAsmParser &Parser = getParser();
5243   SMLoc S, E;
5244 
5245   // Check if the current operand has a custom associated parser, if so, try to
5246   // custom parse the operand, or fallback to the general approach.
5247   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
5248   if (ResTy == MatchOperand_Success)
5249     return false;
5250   // If there wasn't a custom match, try the generic matcher below. Otherwise,
5251   // there was a match, but an error occurred, in which case, just return that
5252   // the operand parsing failed.
5253   if (ResTy == MatchOperand_ParseFail)
5254     return true;
5255 
5256   switch (getLexer().getKind()) {
5257   default:
5258     Error(Parser.getTok().getLoc(), "unexpected token in operand");
5259     return true;
5260   case AsmToken::Identifier: {
5261     // If we've seen a branch mnemonic, the next operand must be a label.  This
5262     // is true even if the label is a register name.  So "br r1" means branch to
5263     // label "r1".
5264     bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl";
5265     if (!ExpectLabel) {
5266       if (!tryParseRegisterWithWriteBack(Operands))
5267         return false;
5268       int Res = tryParseShiftRegister(Operands);
5269       if (Res == 0) // success
5270         return false;
5271       else if (Res == -1) // irrecoverable error
5272         return true;
5273       // If this is VMRS, check for the apsr_nzcv operand.
5274       if (Mnemonic == "vmrs" &&
5275           Parser.getTok().getString().equals_lower("apsr_nzcv")) {
5276         S = Parser.getTok().getLoc();
5277         Parser.Lex();
5278         Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
5279         return false;
5280       }
5281     }
5282 
5283     // Fall though for the Identifier case that is not a register or a
5284     // special name.
5285     LLVM_FALLTHROUGH;
5286   }
5287   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
5288   case AsmToken::Integer: // things like 1f and 2b as a branch targets
5289   case AsmToken::String:  // quoted label names.
5290   case AsmToken::Dot: {   // . as a branch target
5291     // This was not a register so parse other operands that start with an
5292     // identifier (like labels) as expressions and create them as immediates.
5293     const MCExpr *IdVal;
5294     S = Parser.getTok().getLoc();
5295     if (getParser().parseExpression(IdVal))
5296       return true;
5297     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5298     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
5299     return false;
5300   }
5301   case AsmToken::LBrac:
5302     return parseMemory(Operands);
5303   case AsmToken::LCurly:
5304     return parseRegisterList(Operands);
5305   case AsmToken::Dollar:
5306   case AsmToken::Hash:
5307     // #42 -> immediate.
5308     S = Parser.getTok().getLoc();
5309     Parser.Lex();
5310 
5311     if (Parser.getTok().isNot(AsmToken::Colon)) {
5312       bool isNegative = Parser.getTok().is(AsmToken::Minus);
5313       const MCExpr *ImmVal;
5314       if (getParser().parseExpression(ImmVal))
5315         return true;
5316       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
5317       if (CE) {
5318         int32_t Val = CE->getValue();
5319         if (isNegative && Val == 0)
5320           ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(),
5321                                           getContext());
5322       }
5323       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5324       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
5325 
5326       // There can be a trailing '!' on operands that we want as a separate
5327       // '!' Token operand. Handle that here. For example, the compatibility
5328       // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
5329       if (Parser.getTok().is(AsmToken::Exclaim)) {
5330         Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(),
5331                                                    Parser.getTok().getLoc()));
5332         Parser.Lex(); // Eat exclaim token
5333       }
5334       return false;
5335     }
5336     // w/ a ':' after the '#', it's just like a plain ':'.
5337     LLVM_FALLTHROUGH;
5338 
5339   case AsmToken::Colon: {
5340     S = Parser.getTok().getLoc();
5341     // ":lower16:" and ":upper16:" expression prefixes
5342     // FIXME: Check it's an expression prefix,
5343     // e.g. (FOO - :lower16:BAR) isn't legal.
5344     ARMMCExpr::VariantKind RefKind;
5345     if (parsePrefix(RefKind))
5346       return true;
5347 
5348     const MCExpr *SubExprVal;
5349     if (getParser().parseExpression(SubExprVal))
5350       return true;
5351 
5352     const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal,
5353                                               getContext());
5354     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5355     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
5356     return false;
5357   }
5358   case AsmToken::Equal: {
5359     S = Parser.getTok().getLoc();
5360     if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5361       return Error(S, "unexpected token in operand");
5362     Parser.Lex(); // Eat '='
5363     const MCExpr *SubExprVal;
5364     if (getParser().parseExpression(SubExprVal))
5365       return true;
5366     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
5367 
5368     // execute-only: we assume that assembly programmers know what they are
5369     // doing and allow literal pool creation here
5370     Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E));
5371     return false;
5372   }
5373   }
5374 }
5375 
5376 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
5377 //  :lower16: and :upper16:.
5378 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
5379   MCAsmParser &Parser = getParser();
5380   RefKind = ARMMCExpr::VK_ARM_None;
5381 
5382   // consume an optional '#' (GNU compatibility)
5383   if (getLexer().is(AsmToken::Hash))
5384     Parser.Lex();
5385 
5386   // :lower16: and :upper16: modifiers
5387   assert(getLexer().is(AsmToken::Colon) && "expected a :");
5388   Parser.Lex(); // Eat ':'
5389 
5390   if (getLexer().isNot(AsmToken::Identifier)) {
5391     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
5392     return true;
5393   }
5394 
5395   enum {
5396     COFF = (1 << MCObjectFileInfo::IsCOFF),
5397     ELF = (1 << MCObjectFileInfo::IsELF),
5398     MACHO = (1 << MCObjectFileInfo::IsMachO),
5399     WASM = (1 << MCObjectFileInfo::IsWasm),
5400   };
5401   static const struct PrefixEntry {
5402     const char *Spelling;
5403     ARMMCExpr::VariantKind VariantKind;
5404     uint8_t SupportedFormats;
5405   } PrefixEntries[] = {
5406     { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO },
5407     { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO },
5408   };
5409 
5410   StringRef IDVal = Parser.getTok().getIdentifier();
5411 
5412   const auto &Prefix =
5413       std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries),
5414                    [&IDVal](const PrefixEntry &PE) {
5415                       return PE.Spelling == IDVal;
5416                    });
5417   if (Prefix == std::end(PrefixEntries)) {
5418     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
5419     return true;
5420   }
5421 
5422   uint8_t CurrentFormat;
5423   switch (getContext().getObjectFileInfo()->getObjectFileType()) {
5424   case MCObjectFileInfo::IsMachO:
5425     CurrentFormat = MACHO;
5426     break;
5427   case MCObjectFileInfo::IsELF:
5428     CurrentFormat = ELF;
5429     break;
5430   case MCObjectFileInfo::IsCOFF:
5431     CurrentFormat = COFF;
5432     break;
5433   case MCObjectFileInfo::IsWasm:
5434     CurrentFormat = WASM;
5435     break;
5436   }
5437 
5438   if (~Prefix->SupportedFormats & CurrentFormat) {
5439     Error(Parser.getTok().getLoc(),
5440           "cannot represent relocation in the current file format");
5441     return true;
5442   }
5443 
5444   RefKind = Prefix->VariantKind;
5445   Parser.Lex();
5446 
5447   if (getLexer().isNot(AsmToken::Colon)) {
5448     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
5449     return true;
5450   }
5451   Parser.Lex(); // Eat the last ':'
5452 
5453   return false;
5454 }
5455 
5456 /// \brief Given a mnemonic, split out possible predication code and carry
5457 /// setting letters to form a canonical mnemonic and flags.
5458 //
5459 // FIXME: Would be nice to autogen this.
5460 // FIXME: This is a bit of a maze of special cases.
5461 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
5462                                       unsigned &PredicationCode,
5463                                       bool &CarrySetting,
5464                                       unsigned &ProcessorIMod,
5465                                       StringRef &ITMask) {
5466   PredicationCode = ARMCC::AL;
5467   CarrySetting = false;
5468   ProcessorIMod = 0;
5469 
5470   // Ignore some mnemonics we know aren't predicated forms.
5471   //
5472   // FIXME: Would be nice to autogen this.
5473   if ((Mnemonic == "movs" && isThumb()) ||
5474       Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
5475       Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
5476       Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
5477       Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
5478       Mnemonic == "vaclt" || Mnemonic == "vacle"  || Mnemonic == "hlt" ||
5479       Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
5480       Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
5481       Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
5482       Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" ||
5483       Mnemonic == "vcvta" || Mnemonic == "vcvtn"  || Mnemonic == "vcvtp" ||
5484       Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" ||
5485       Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" ||
5486       Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" ||
5487       Mnemonic == "bxns"  || Mnemonic == "blxns" ||
5488       Mnemonic == "vudot" || Mnemonic == "vsdot" ||
5489       Mnemonic == "vcmla" || Mnemonic == "vcadd")
5490     return Mnemonic;
5491 
5492   // First, split out any predication code. Ignore mnemonics we know aren't
5493   // predicated but do have a carry-set and so weren't caught above.
5494   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
5495       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
5496       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
5497       Mnemonic != "sbcs" && Mnemonic != "rscs") {
5498     unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2));
5499     if (CC != ~0U) {
5500       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
5501       PredicationCode = CC;
5502     }
5503   }
5504 
5505   // Next, determine if we have a carry setting bit. We explicitly ignore all
5506   // the instructions we know end in 's'.
5507   if (Mnemonic.endswith("s") &&
5508       !(Mnemonic == "cps" || Mnemonic == "mls" ||
5509         Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
5510         Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
5511         Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
5512         Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
5513         Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
5514         Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
5515         Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
5516         Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" ||
5517         Mnemonic == "bxns" || Mnemonic == "blxns" ||
5518         (Mnemonic == "movs" && isThumb()))) {
5519     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
5520     CarrySetting = true;
5521   }
5522 
5523   // The "cps" instruction can have a interrupt mode operand which is glued into
5524   // the mnemonic. Check if this is the case, split it and parse the imod op
5525   if (Mnemonic.startswith("cps")) {
5526     // Split out any imod code.
5527     unsigned IMod =
5528       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
5529       .Case("ie", ARM_PROC::IE)
5530       .Case("id", ARM_PROC::ID)
5531       .Default(~0U);
5532     if (IMod != ~0U) {
5533       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
5534       ProcessorIMod = IMod;
5535     }
5536   }
5537 
5538   // The "it" instruction has the condition mask on the end of the mnemonic.
5539   if (Mnemonic.startswith("it")) {
5540     ITMask = Mnemonic.slice(2, Mnemonic.size());
5541     Mnemonic = Mnemonic.slice(0, 2);
5542   }
5543 
5544   return Mnemonic;
5545 }
5546 
5547 /// \brief Given a canonical mnemonic, determine if the instruction ever allows
5548 /// inclusion of carry set or predication code operands.
5549 //
5550 // FIXME: It would be nice to autogen this.
5551 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst,
5552                                          bool &CanAcceptCarrySet,
5553                                          bool &CanAcceptPredicationCode) {
5554   CanAcceptCarrySet =
5555       Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5556       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
5557       Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" ||
5558       Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" ||
5559       Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" ||
5560       Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" ||
5561       Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" ||
5562       (!isThumb() &&
5563        (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" ||
5564         Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull"));
5565 
5566   if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" ||
5567       Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" ||
5568       Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" ||
5569       Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") ||
5570       Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" ||
5571       Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" ||
5572       Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" ||
5573       Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" ||
5574       Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" ||
5575       Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") ||
5576       (FullInst.startswith("vmull") && FullInst.endswith(".p64")) ||
5577       Mnemonic == "vmovx" || Mnemonic == "vins" ||
5578       Mnemonic == "vudot" || Mnemonic == "vsdot" ||
5579       Mnemonic == "vcmla" || Mnemonic == "vcadd") {
5580     // These mnemonics are never predicable
5581     CanAcceptPredicationCode = false;
5582   } else if (!isThumb()) {
5583     // Some instructions are only predicable in Thumb mode
5584     CanAcceptPredicationCode =
5585         Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" &&
5586         Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" &&
5587         Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" &&
5588         Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" &&
5589         Mnemonic != "ldc2" && Mnemonic != "ldc2l" && Mnemonic != "stc2" &&
5590         Mnemonic != "stc2l" && !Mnemonic.startswith("rfe") &&
5591         !Mnemonic.startswith("srs");
5592   } else if (isThumbOne()) {
5593     if (hasV6MOps())
5594       CanAcceptPredicationCode = Mnemonic != "movs";
5595     else
5596       CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs";
5597   } else
5598     CanAcceptPredicationCode = true;
5599 }
5600 
5601 // \brief Some Thumb instructions have two operand forms that are not
5602 // available as three operand, convert to two operand form if possible.
5603 //
5604 // FIXME: We would really like to be able to tablegen'erate this.
5605 void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic,
5606                                                  bool CarrySetting,
5607                                                  OperandVector &Operands) {
5608   if (Operands.size() != 6)
5609     return;
5610 
5611   const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5612         auto &Op4 = static_cast<ARMOperand &>(*Operands[4]);
5613   if (!Op3.isReg() || !Op4.isReg())
5614     return;
5615 
5616   auto Op3Reg = Op3.getReg();
5617   auto Op4Reg = Op4.getReg();
5618 
5619   // For most Thumb2 cases we just generate the 3 operand form and reduce
5620   // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr)
5621   // won't accept SP or PC so we do the transformation here taking care
5622   // with immediate range in the 'add sp, sp #imm' case.
5623   auto &Op5 = static_cast<ARMOperand &>(*Operands[5]);
5624   if (isThumbTwo()) {
5625     if (Mnemonic != "add")
5626       return;
5627     bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC ||
5628                         (Op5.isReg() && Op5.getReg() == ARM::PC);
5629     if (!TryTransform) {
5630       TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP ||
5631                       (Op5.isReg() && Op5.getReg() == ARM::SP)) &&
5632                      !(Op3Reg == ARM::SP && Op4Reg == ARM::SP &&
5633                        Op5.isImm() && !Op5.isImm0_508s4());
5634     }
5635     if (!TryTransform)
5636       return;
5637   } else if (!isThumbOne())
5638     return;
5639 
5640   if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" ||
5641         Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
5642         Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" ||
5643         Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic"))
5644     return;
5645 
5646   // If first 2 operands of a 3 operand instruction are the same
5647   // then transform to 2 operand version of the same instruction
5648   // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1'
5649   bool Transform = Op3Reg == Op4Reg;
5650 
5651   // For communtative operations, we might be able to transform if we swap
5652   // Op4 and Op5.  The 'ADD Rdm, SP, Rdm' form is already handled specially
5653   // as tADDrsp.
5654   const ARMOperand *LastOp = &Op5;
5655   bool Swap = false;
5656   if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() &&
5657       ((Mnemonic == "add" && Op4Reg != ARM::SP) ||
5658        Mnemonic == "and" || Mnemonic == "eor" ||
5659        Mnemonic == "adc" || Mnemonic == "orr")) {
5660     Swap = true;
5661     LastOp = &Op4;
5662     Transform = true;
5663   }
5664 
5665   // If both registers are the same then remove one of them from
5666   // the operand list, with certain exceptions.
5667   if (Transform) {
5668     // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the
5669     // 2 operand forms don't exist.
5670     if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") &&
5671         LastOp->isReg())
5672       Transform = false;
5673 
5674     // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into
5675     // 3-bits because the ARMARM says not to.
5676     if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7())
5677       Transform = false;
5678   }
5679 
5680   if (Transform) {
5681     if (Swap)
5682       std::swap(Op4, Op5);
5683     Operands.erase(Operands.begin() + 3);
5684   }
5685 }
5686 
5687 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
5688                                           OperandVector &Operands) {
5689   // FIXME: This is all horribly hacky. We really need a better way to deal
5690   // with optional operands like this in the matcher table.
5691 
5692   // The 'mov' mnemonic is special. One variant has a cc_out operand, while
5693   // another does not. Specifically, the MOVW instruction does not. So we
5694   // special case it here and remove the defaulted (non-setting) cc_out
5695   // operand if that's the instruction we're trying to match.
5696   //
5697   // We do this as post-processing of the explicit operands rather than just
5698   // conditionally adding the cc_out in the first place because we need
5699   // to check the type of the parsed immediate operand.
5700   if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
5701       !static_cast<ARMOperand &>(*Operands[4]).isModImm() &&
5702       static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() &&
5703       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5704     return true;
5705 
5706   // Register-register 'add' for thumb does not have a cc_out operand
5707   // when there are only two register operands.
5708   if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
5709       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5710       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5711       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0)
5712     return true;
5713   // Register-register 'add' for thumb does not have a cc_out operand
5714   // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
5715   // have to check the immediate range here since Thumb2 has a variant
5716   // that can handle a different range and has a cc_out operand.
5717   if (((isThumb() && Mnemonic == "add") ||
5718        (isThumbTwo() && Mnemonic == "sub")) &&
5719       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5720       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5721       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP &&
5722       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5723       ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) ||
5724        static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4()))
5725     return true;
5726   // For Thumb2, add/sub immediate does not have a cc_out operand for the
5727   // imm0_4095 variant. That's the least-preferred variant when
5728   // selecting via the generic "add" mnemonic, so to know that we
5729   // should remove the cc_out operand, we have to explicitly check that
5730   // it's not one of the other variants. Ugh.
5731   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
5732       Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5733       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5734       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
5735     // Nest conditions rather than one big 'if' statement for readability.
5736     //
5737     // If both registers are low, we're in an IT block, and the immediate is
5738     // in range, we should use encoding T1 instead, which has a cc_out.
5739     if (inITBlock() &&
5740         isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) &&
5741         isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) &&
5742         static_cast<ARMOperand &>(*Operands[5]).isImm0_7())
5743       return false;
5744     // Check against T3. If the second register is the PC, this is an
5745     // alternate form of ADR, which uses encoding T4, so check for that too.
5746     if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC &&
5747         static_cast<ARMOperand &>(*Operands[5]).isT2SOImm())
5748       return false;
5749 
5750     // Otherwise, we use encoding T4, which does not have a cc_out
5751     // operand.
5752     return true;
5753   }
5754 
5755   // The thumb2 multiply instruction doesn't have a CCOut register, so
5756   // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
5757   // use the 16-bit encoding or not.
5758   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
5759       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5760       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5761       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5762       static_cast<ARMOperand &>(*Operands[5]).isReg() &&
5763       // If the registers aren't low regs, the destination reg isn't the
5764       // same as one of the source regs, or the cc_out operand is zero
5765       // outside of an IT block, we have to use the 32-bit encoding, so
5766       // remove the cc_out operand.
5767       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5768        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5769        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) ||
5770        !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5771                             static_cast<ARMOperand &>(*Operands[5]).getReg() &&
5772                         static_cast<ARMOperand &>(*Operands[3]).getReg() !=
5773                             static_cast<ARMOperand &>(*Operands[4]).getReg())))
5774     return true;
5775 
5776   // Also check the 'mul' syntax variant that doesn't specify an explicit
5777   // destination register.
5778   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
5779       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5780       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5781       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
5782       // If the registers aren't low regs  or the cc_out operand is zero
5783       // outside of an IT block, we have to use the 32-bit encoding, so
5784       // remove the cc_out operand.
5785       (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) ||
5786        !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) ||
5787        !inITBlock()))
5788     return true;
5789 
5790   // Register-register 'add/sub' for thumb does not have a cc_out operand
5791   // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
5792   // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
5793   // right, this will result in better diagnostics (which operand is off)
5794   // anyway.
5795   if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
5796       (Operands.size() == 5 || Operands.size() == 6) &&
5797       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
5798       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP &&
5799       static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 &&
5800       (static_cast<ARMOperand &>(*Operands[4]).isImm() ||
5801        (Operands.size() == 6 &&
5802         static_cast<ARMOperand &>(*Operands[5]).isImm())))
5803     return true;
5804 
5805   return false;
5806 }
5807 
5808 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic,
5809                                               OperandVector &Operands) {
5810   // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON
5811   unsigned RegIdx = 3;
5812   if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") &&
5813       (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" ||
5814        static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) {
5815     if (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
5816         (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" ||
5817          static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16"))
5818       RegIdx = 4;
5819 
5820     if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() &&
5821         (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(
5822              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) ||
5823          ARMMCRegisterClasses[ARM::QPRRegClassID].contains(
5824              static_cast<ARMOperand &>(*Operands[RegIdx]).getReg())))
5825       return true;
5826   }
5827   return false;
5828 }
5829 
5830 static bool isDataTypeToken(StringRef Tok) {
5831   return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
5832     Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
5833     Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
5834     Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
5835     Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
5836     Tok == ".f" || Tok == ".d";
5837 }
5838 
5839 // FIXME: This bit should probably be handled via an explicit match class
5840 // in the .td files that matches the suffix instead of having it be
5841 // a literal string token the way it is now.
5842 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
5843   return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
5844 }
5845 
5846 static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features,
5847                                  unsigned VariantID);
5848 
5849 static bool RequiresVFPRegListValidation(StringRef Inst,
5850                                          bool &AcceptSinglePrecisionOnly,
5851                                          bool &AcceptDoublePrecisionOnly) {
5852   if (Inst.size() < 7)
5853     return false;
5854 
5855   if (Inst.startswith("fldm") || Inst.startswith("fstm")) {
5856     StringRef AddressingMode = Inst.substr(4, 2);
5857     if (AddressingMode == "ia" || AddressingMode == "db" ||
5858         AddressingMode == "ea" || AddressingMode == "fd") {
5859       AcceptSinglePrecisionOnly = Inst[6] == 's';
5860       AcceptDoublePrecisionOnly = Inst[6] == 'd' || Inst[6] == 'x';
5861       return true;
5862     }
5863   }
5864 
5865   return false;
5866 }
5867 
5868 // The GNU assembler has aliases of ldrd and strd with the second register
5869 // omitted. We don't have a way to do that in tablegen, so fix it up here.
5870 //
5871 // We have to be careful to not emit an invalid Rt2 here, because the rest of
5872 // the assmebly parser could then generate confusing diagnostics refering to
5873 // it. If we do find anything that prevents us from doing the transformation we
5874 // bail out, and let the assembly parser report an error on the instruction as
5875 // it is written.
5876 void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic,
5877                                      OperandVector &Operands) {
5878   if (Mnemonic != "ldrd" && Mnemonic != "strd")
5879     return;
5880   if (Operands.size() < 4)
5881     return;
5882 
5883   ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]);
5884   ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]);
5885 
5886   if (!Op2.isReg())
5887     return;
5888   if (!Op3.isMem())
5889     return;
5890 
5891   const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID);
5892   if (!GPR.contains(Op2.getReg()))
5893     return;
5894 
5895   unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg());
5896   if (!isThumb() && (RtEncoding & 1)) {
5897     // In ARM mode, the registers must be from an aligned pair, this
5898     // restriction does not apply in Thumb mode.
5899     return;
5900   }
5901   if (Op2.getReg() == ARM::PC)
5902     return;
5903   unsigned PairedReg = GPR.getRegister(RtEncoding + 1);
5904   if (!PairedReg || PairedReg == ARM::PC ||
5905       (PairedReg == ARM::SP && !hasV8Ops()))
5906     return;
5907 
5908   Operands.insert(
5909       Operands.begin() + 3,
5910       ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc()));
5911   return;
5912 }
5913 
5914 /// Parse an arm instruction mnemonic followed by its operands.
5915 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
5916                                     SMLoc NameLoc, OperandVector &Operands) {
5917   MCAsmParser &Parser = getParser();
5918   // FIXME: Can this be done via tablegen in some fashion?
5919   bool RequireVFPRegisterListCheck;
5920   bool AcceptSinglePrecisionOnly;
5921   bool AcceptDoublePrecisionOnly;
5922   RequireVFPRegisterListCheck =
5923     RequiresVFPRegListValidation(Name, AcceptSinglePrecisionOnly,
5924                                  AcceptDoublePrecisionOnly);
5925 
5926   // Apply mnemonic aliases before doing anything else, as the destination
5927   // mnemonic may include suffices and we want to handle them normally.
5928   // The generic tblgen'erated code does this later, at the start of
5929   // MatchInstructionImpl(), but that's too late for aliases that include
5930   // any sort of suffix.
5931   uint64_t AvailableFeatures = getAvailableFeatures();
5932   unsigned AssemblerDialect = getParser().getAssemblerDialect();
5933   applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect);
5934 
5935   // First check for the ARM-specific .req directive.
5936   if (Parser.getTok().is(AsmToken::Identifier) &&
5937       Parser.getTok().getIdentifier() == ".req") {
5938     parseDirectiveReq(Name, NameLoc);
5939     // We always return 'error' for this, as we're done with this
5940     // statement and don't need to match the 'instruction."
5941     return true;
5942   }
5943 
5944   // Create the leading tokens for the mnemonic, split by '.' characters.
5945   size_t Start = 0, Next = Name.find('.');
5946   StringRef Mnemonic = Name.slice(Start, Next);
5947 
5948   // Split out the predication code and carry setting flag from the mnemonic.
5949   unsigned PredicationCode;
5950   unsigned ProcessorIMod;
5951   bool CarrySetting;
5952   StringRef ITMask;
5953   Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
5954                            ProcessorIMod, ITMask);
5955 
5956   // In Thumb1, only the branch (B) instruction can be predicated.
5957   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5958     return Error(NameLoc, "conditional execution not supported in Thumb1");
5959   }
5960 
5961   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5962 
5963   // Handle the IT instruction ITMask. Convert it to a bitmask. This
5964   // is the mask as it will be for the IT encoding if the conditional
5965   // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5966   // where the conditional bit0 is zero, the instruction post-processing
5967   // will adjust the mask accordingly.
5968   if (Mnemonic == "it") {
5969     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5970     if (ITMask.size() > 3) {
5971       return Error(Loc, "too many conditions on IT instruction");
5972     }
5973     unsigned Mask = 8;
5974     for (unsigned i = ITMask.size(); i != 0; --i) {
5975       char pos = ITMask[i - 1];
5976       if (pos != 't' && pos != 'e') {
5977         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5978       }
5979       Mask >>= 1;
5980       if (ITMask[i - 1] == 't')
5981         Mask |= 8;
5982     }
5983     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5984   }
5985 
5986   // FIXME: This is all a pretty gross hack. We should automatically handle
5987   // optional operands like this via tblgen.
5988 
5989   // Next, add the CCOut and ConditionCode operands, if needed.
5990   //
5991   // For mnemonics which can ever incorporate a carry setting bit or predication
5992   // code, our matching model involves us always generating CCOut and
5993   // ConditionCode operands to match the mnemonic "as written" and then we let
5994   // the matcher deal with finding the right instruction or generating an
5995   // appropriate error.
5996   bool CanAcceptCarrySet, CanAcceptPredicationCode;
5997   getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode);
5998 
5999   // If we had a carry-set on an instruction that can't do that, issue an
6000   // error.
6001   if (!CanAcceptCarrySet && CarrySetting) {
6002     return Error(NameLoc, "instruction '" + Mnemonic +
6003                  "' can not set flags, but 's' suffix specified");
6004   }
6005   // If we had a predication code on an instruction that can't do that, issue an
6006   // error.
6007   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
6008     return Error(NameLoc, "instruction '" + Mnemonic +
6009                  "' is not predicable, but condition code specified");
6010   }
6011 
6012   // Add the carry setting operand, if necessary.
6013   if (CanAcceptCarrySet) {
6014     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
6015     Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
6016                                                Loc));
6017   }
6018 
6019   // Add the predication code operand, if necessary.
6020   if (CanAcceptPredicationCode) {
6021     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
6022                                       CarrySetting);
6023     Operands.push_back(ARMOperand::CreateCondCode(
6024                          ARMCC::CondCodes(PredicationCode), Loc));
6025   }
6026 
6027   // Add the processor imod operand, if necessary.
6028   if (ProcessorIMod) {
6029     Operands.push_back(ARMOperand::CreateImm(
6030           MCConstantExpr::create(ProcessorIMod, getContext()),
6031                                  NameLoc, NameLoc));
6032   } else if (Mnemonic == "cps" && isMClass()) {
6033     return Error(NameLoc, "instruction 'cps' requires effect for M-class");
6034   }
6035 
6036   // Add the remaining tokens in the mnemonic.
6037   while (Next != StringRef::npos) {
6038     Start = Next;
6039     Next = Name.find('.', Start + 1);
6040     StringRef ExtraToken = Name.slice(Start, Next);
6041 
6042     // Some NEON instructions have an optional datatype suffix that is
6043     // completely ignored. Check for that.
6044     if (isDataTypeToken(ExtraToken) &&
6045         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
6046       continue;
6047 
6048     // For for ARM mode generate an error if the .n qualifier is used.
6049     if (ExtraToken == ".n" && !isThumb()) {
6050       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6051       return Error(Loc, "instruction with .n (narrow) qualifier not allowed in "
6052                    "arm mode");
6053     }
6054 
6055     // The .n qualifier is always discarded as that is what the tables
6056     // and matcher expect.  In ARM mode the .w qualifier has no effect,
6057     // so discard it to avoid errors that can be caused by the matcher.
6058     if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) {
6059       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
6060       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
6061     }
6062   }
6063 
6064   // Read the remaining operands.
6065   if (getLexer().isNot(AsmToken::EndOfStatement)) {
6066     // Read the first operand.
6067     if (parseOperand(Operands, Mnemonic)) {
6068       return true;
6069     }
6070 
6071     while (parseOptionalToken(AsmToken::Comma)) {
6072       // Parse and remember the operand.
6073       if (parseOperand(Operands, Mnemonic)) {
6074         return true;
6075       }
6076     }
6077   }
6078 
6079   if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
6080     return true;
6081 
6082   if (RequireVFPRegisterListCheck) {
6083     ARMOperand &Op = static_cast<ARMOperand &>(*Operands.back());
6084     if (AcceptSinglePrecisionOnly && !Op.isSPRRegList())
6085       return Error(Op.getStartLoc(),
6086                    "VFP/Neon single precision register expected");
6087     if (AcceptDoublePrecisionOnly && !Op.isDPRRegList())
6088       return Error(Op.getStartLoc(),
6089                    "VFP/Neon double precision register expected");
6090   }
6091 
6092   tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands);
6093 
6094   // Some instructions, mostly Thumb, have forms for the same mnemonic that
6095   // do and don't have a cc_out optional-def operand. With some spot-checks
6096   // of the operand list, we can figure out which variant we're trying to
6097   // parse and adjust accordingly before actually matching. We shouldn't ever
6098   // try to remove a cc_out operand that was explicitly set on the
6099   // mnemonic, of course (CarrySetting == true). Reason number #317 the
6100   // table driven matcher doesn't fit well with the ARM instruction set.
6101   if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands))
6102     Operands.erase(Operands.begin() + 1);
6103 
6104   // Some instructions have the same mnemonic, but don't always
6105   // have a predicate. Distinguish them here and delete the
6106   // predicate if needed.
6107   if (shouldOmitPredicateOperand(Mnemonic, Operands))
6108     Operands.erase(Operands.begin() + 1);
6109 
6110   // ARM mode 'blx' need special handling, as the register operand version
6111   // is predicable, but the label operand version is not. So, we can't rely
6112   // on the Mnemonic based checking to correctly figure out when to put
6113   // a k_CondCode operand in the list. If we're trying to match the label
6114   // version, remove the k_CondCode operand here.
6115   if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
6116       static_cast<ARMOperand &>(*Operands[2]).isImm())
6117     Operands.erase(Operands.begin() + 1);
6118 
6119   // Adjust operands of ldrexd/strexd to MCK_GPRPair.
6120   // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
6121   // a single GPRPair reg operand is used in the .td file to replace the two
6122   // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
6123   // expressed as a GPRPair, so we have to manually merge them.
6124   // FIXME: We would really like to be able to tablegen'erate this.
6125   if (!isThumb() && Operands.size() > 4 &&
6126       (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" ||
6127        Mnemonic == "stlexd")) {
6128     bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd");
6129     unsigned Idx = isLoad ? 2 : 3;
6130     ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]);
6131     ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]);
6132 
6133     const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
6134     // Adjust only if Op1 and Op2 are GPRs.
6135     if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) &&
6136         MRC.contains(Op2.getReg())) {
6137       unsigned Reg1 = Op1.getReg();
6138       unsigned Reg2 = Op2.getReg();
6139       unsigned Rt = MRI->getEncodingValue(Reg1);
6140       unsigned Rt2 = MRI->getEncodingValue(Reg2);
6141 
6142       // Rt2 must be Rt + 1 and Rt must be even.
6143       if (Rt + 1 != Rt2 || (Rt & 1)) {
6144         return Error(Op2.getStartLoc(),
6145                      isLoad ? "destination operands must be sequential"
6146                             : "source operands must be sequential");
6147       }
6148       unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
6149           &(MRI->getRegClass(ARM::GPRPairRegClassID)));
6150       Operands[Idx] =
6151           ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc());
6152       Operands.erase(Operands.begin() + Idx + 1);
6153     }
6154   }
6155 
6156   // GNU Assembler extension (compatibility).
6157   fixupGNULDRDAlias(Mnemonic, Operands);
6158 
6159   // FIXME: As said above, this is all a pretty gross hack.  This instruction
6160   // does not fit with other "subs" and tblgen.
6161   // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction
6162   // so the Mnemonic is the original name "subs" and delete the predicate
6163   // operand so it will match the table entry.
6164   if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 &&
6165       static_cast<ARMOperand &>(*Operands[3]).isReg() &&
6166       static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC &&
6167       static_cast<ARMOperand &>(*Operands[4]).isReg() &&
6168       static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR &&
6169       static_cast<ARMOperand &>(*Operands[5]).isImm()) {
6170     Operands.front() = ARMOperand::CreateToken(Name, NameLoc);
6171     Operands.erase(Operands.begin() + 1);
6172   }
6173   return false;
6174 }
6175 
6176 // Validate context-sensitive operand constraints.
6177 
6178 // return 'true' if register list contains non-low GPR registers,
6179 // 'false' otherwise. If Reg is in the register list or is HiReg, set
6180 // 'containsReg' to true.
6181 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo,
6182                                  unsigned Reg, unsigned HiReg,
6183                                  bool &containsReg) {
6184   containsReg = false;
6185   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
6186     unsigned OpReg = Inst.getOperand(i).getReg();
6187     if (OpReg == Reg)
6188       containsReg = true;
6189     // Anything other than a low register isn't legal here.
6190     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
6191       return true;
6192   }
6193   return false;
6194 }
6195 
6196 // Check if the specified regisgter is in the register list of the inst,
6197 // starting at the indicated operand number.
6198 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) {
6199   for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) {
6200     unsigned OpReg = Inst.getOperand(i).getReg();
6201     if (OpReg == Reg)
6202       return true;
6203   }
6204   return false;
6205 }
6206 
6207 // Return true if instruction has the interesting property of being
6208 // allowed in IT blocks, but not being predicable.
6209 static bool instIsBreakpoint(const MCInst &Inst) {
6210     return Inst.getOpcode() == ARM::tBKPT ||
6211            Inst.getOpcode() == ARM::BKPT ||
6212            Inst.getOpcode() == ARM::tHLT ||
6213            Inst.getOpcode() == ARM::HLT;
6214 }
6215 
6216 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst,
6217                                        const OperandVector &Operands,
6218                                        unsigned ListNo, bool IsARPop) {
6219   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6220   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6221 
6222   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6223   bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR);
6224   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6225 
6226   if (!IsARPop && ListContainsSP)
6227     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6228                  "SP may not be in the register list");
6229   else if (ListContainsPC && ListContainsLR)
6230     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6231                  "PC and LR may not be in the register list simultaneously");
6232   return false;
6233 }
6234 
6235 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst,
6236                                        const OperandVector &Operands,
6237                                        unsigned ListNo) {
6238   const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]);
6239   bool HasWritebackToken = Op.isToken() && Op.getToken() == "!";
6240 
6241   bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP);
6242   bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC);
6243 
6244   if (ListContainsSP && ListContainsPC)
6245     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6246                  "SP and PC may not be in the register list");
6247   else if (ListContainsSP)
6248     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6249                  "SP may not be in the register list");
6250   else if (ListContainsPC)
6251     return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(),
6252                  "PC may not be in the register list");
6253   return false;
6254 }
6255 
6256 // FIXME: We would really like to be able to tablegen'erate this.
6257 bool ARMAsmParser::validateInstruction(MCInst &Inst,
6258                                        const OperandVector &Operands) {
6259   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
6260   SMLoc Loc = Operands[0]->getStartLoc();
6261 
6262   // Check the IT block state first.
6263   // NOTE: BKPT and HLT instructions have the interesting property of being
6264   // allowed in IT blocks, but not being predicable. They just always execute.
6265   if (inITBlock() && !instIsBreakpoint(Inst)) {
6266     // The instruction must be predicable.
6267     if (!MCID.isPredicable())
6268       return Error(Loc, "instructions in IT block must be predicable");
6269     unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
6270     if (Cond != currentITCond()) {
6271       // Find the condition code Operand to get its SMLoc information.
6272       SMLoc CondLoc;
6273       for (unsigned I = 1; I < Operands.size(); ++I)
6274         if (static_cast<ARMOperand &>(*Operands[I]).isCondCode())
6275           CondLoc = Operands[I]->getStartLoc();
6276       return Error(CondLoc, "incorrect condition in IT block; got '" +
6277                    StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
6278                    "', but expected '" +
6279                    ARMCondCodeToString(ARMCC::CondCodes(currentITCond())) + "'");
6280     }
6281   // Check for non-'al' condition codes outside of the IT block.
6282   } else if (isThumbTwo() && MCID.isPredicable() &&
6283              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6284              ARMCC::AL && Inst.getOpcode() != ARM::tBcc &&
6285              Inst.getOpcode() != ARM::t2Bcc) {
6286     return Error(Loc, "predicated instructions must be in IT block");
6287   } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() &&
6288              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
6289                  ARMCC::AL) {
6290     return Warning(Loc, "predicated instructions should be in IT block");
6291   }
6292 
6293   // PC-setting instructions in an IT block, but not the last instruction of
6294   // the block, are UNPREDICTABLE.
6295   if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) {
6296     return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block");
6297   }
6298 
6299   const unsigned Opcode = Inst.getOpcode();
6300   switch (Opcode) {
6301   case ARM::LDRD:
6302   case ARM::LDRD_PRE:
6303   case ARM::LDRD_POST: {
6304     const unsigned RtReg = Inst.getOperand(0).getReg();
6305 
6306     // Rt can't be R14.
6307     if (RtReg == ARM::LR)
6308       return Error(Operands[3]->getStartLoc(),
6309                    "Rt can't be R14");
6310 
6311     const unsigned Rt = MRI->getEncodingValue(RtReg);
6312     // Rt must be even-numbered.
6313     if ((Rt & 1) == 1)
6314       return Error(Operands[3]->getStartLoc(),
6315                    "Rt must be even-numbered");
6316 
6317     // Rt2 must be Rt + 1.
6318     const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6319     if (Rt2 != Rt + 1)
6320       return Error(Operands[3]->getStartLoc(),
6321                    "destination operands must be sequential");
6322 
6323     if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) {
6324       const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg());
6325       // For addressing modes with writeback, the base register needs to be
6326       // different from the destination registers.
6327       if (Rn == Rt || Rn == Rt2)
6328         return Error(Operands[3]->getStartLoc(),
6329                      "base register needs to be different from destination "
6330                      "registers");
6331     }
6332 
6333     return false;
6334   }
6335   case ARM::t2LDRDi8:
6336   case ARM::t2LDRD_PRE:
6337   case ARM::t2LDRD_POST: {
6338     // Rt2 must be different from Rt.
6339     unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6340     unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6341     if (Rt2 == Rt)
6342       return Error(Operands[3]->getStartLoc(),
6343                    "destination operands can't be identical");
6344     return false;
6345   }
6346   case ARM::t2BXJ: {
6347     const unsigned RmReg = Inst.getOperand(0).getReg();
6348     // Rm = SP is no longer unpredictable in v8-A
6349     if (RmReg == ARM::SP && !hasV8Ops())
6350       return Error(Operands[2]->getStartLoc(),
6351                    "r13 (SP) is an unpredictable operand to BXJ");
6352     return false;
6353   }
6354   case ARM::STRD: {
6355     // Rt2 must be Rt + 1.
6356     unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6357     unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6358     if (Rt2 != Rt + 1)
6359       return Error(Operands[3]->getStartLoc(),
6360                    "source operands must be sequential");
6361     return false;
6362   }
6363   case ARM::STRD_PRE:
6364   case ARM::STRD_POST: {
6365     // Rt2 must be Rt + 1.
6366     unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6367     unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6368     if (Rt2 != Rt + 1)
6369       return Error(Operands[3]->getStartLoc(),
6370                    "source operands must be sequential");
6371     return false;
6372   }
6373   case ARM::STR_PRE_IMM:
6374   case ARM::STR_PRE_REG:
6375   case ARM::STR_POST_IMM:
6376   case ARM::STR_POST_REG:
6377   case ARM::STRH_PRE:
6378   case ARM::STRH_POST:
6379   case ARM::STRB_PRE_IMM:
6380   case ARM::STRB_PRE_REG:
6381   case ARM::STRB_POST_IMM:
6382   case ARM::STRB_POST_REG: {
6383     // Rt must be different from Rn.
6384     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
6385     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6386 
6387     if (Rt == Rn)
6388       return Error(Operands[3]->getStartLoc(),
6389                    "source register and base register can't be identical");
6390     return false;
6391   }
6392   case ARM::LDR_PRE_IMM:
6393   case ARM::LDR_PRE_REG:
6394   case ARM::LDR_POST_IMM:
6395   case ARM::LDR_POST_REG:
6396   case ARM::LDRH_PRE:
6397   case ARM::LDRH_POST:
6398   case ARM::LDRSH_PRE:
6399   case ARM::LDRSH_POST:
6400   case ARM::LDRB_PRE_IMM:
6401   case ARM::LDRB_PRE_REG:
6402   case ARM::LDRB_POST_IMM:
6403   case ARM::LDRB_POST_REG:
6404   case ARM::LDRSB_PRE:
6405   case ARM::LDRSB_POST: {
6406     // Rt must be different from Rn.
6407     const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
6408     const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg());
6409 
6410     if (Rt == Rn)
6411       return Error(Operands[3]->getStartLoc(),
6412                    "destination register and base register can't be identical");
6413     return false;
6414   }
6415   case ARM::SBFX:
6416   case ARM::UBFX: {
6417     // Width must be in range [1, 32-lsb].
6418     unsigned LSB = Inst.getOperand(2).getImm();
6419     unsigned Widthm1 = Inst.getOperand(3).getImm();
6420     if (Widthm1 >= 32 - LSB)
6421       return Error(Operands[5]->getStartLoc(),
6422                    "bitfield width must be in range [1,32-lsb]");
6423     return false;
6424   }
6425   // Notionally handles ARM::tLDMIA_UPD too.
6426   case ARM::tLDMIA: {
6427     // If we're parsing Thumb2, the .w variant is available and handles
6428     // most cases that are normally illegal for a Thumb1 LDM instruction.
6429     // We'll make the transformation in processInstruction() if necessary.
6430     //
6431     // Thumb LDM instructions are writeback iff the base register is not
6432     // in the register list.
6433     unsigned Rn = Inst.getOperand(0).getReg();
6434     bool HasWritebackToken =
6435         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
6436          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
6437     bool ListContainsBase;
6438     if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo())
6439       return Error(Operands[3 + HasWritebackToken]->getStartLoc(),
6440                    "registers must be in range r0-r7");
6441     // If we should have writeback, then there should be a '!' token.
6442     if (!ListContainsBase && !HasWritebackToken && !isThumbTwo())
6443       return Error(Operands[2]->getStartLoc(),
6444                    "writeback operator '!' expected");
6445     // If we should not have writeback, there must not be a '!'. This is
6446     // true even for the 32-bit wide encodings.
6447     if (ListContainsBase && HasWritebackToken)
6448       return Error(Operands[3]->getStartLoc(),
6449                    "writeback operator '!' not allowed when base register "
6450                    "in register list");
6451 
6452     if (validatetLDMRegList(Inst, Operands, 3))
6453       return true;
6454     break;
6455   }
6456   case ARM::LDMIA_UPD:
6457   case ARM::LDMDB_UPD:
6458   case ARM::LDMIB_UPD:
6459   case ARM::LDMDA_UPD:
6460     // ARM variants loading and updating the same register are only officially
6461     // UNPREDICTABLE on v7 upwards. Goodness knows what they did before.
6462     if (!hasV7Ops())
6463       break;
6464     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6465       return Error(Operands.back()->getStartLoc(),
6466                    "writeback register not allowed in register list");
6467     break;
6468   case ARM::t2LDMIA:
6469   case ARM::t2LDMDB:
6470     if (validatetLDMRegList(Inst, Operands, 3))
6471       return true;
6472     break;
6473   case ARM::t2STMIA:
6474   case ARM::t2STMDB:
6475     if (validatetSTMRegList(Inst, Operands, 3))
6476       return true;
6477     break;
6478   case ARM::t2LDMIA_UPD:
6479   case ARM::t2LDMDB_UPD:
6480   case ARM::t2STMIA_UPD:
6481   case ARM::t2STMDB_UPD:
6482     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
6483       return Error(Operands.back()->getStartLoc(),
6484                    "writeback register not allowed in register list");
6485 
6486     if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
6487       if (validatetLDMRegList(Inst, Operands, 3))
6488         return true;
6489     } else {
6490       if (validatetSTMRegList(Inst, Operands, 3))
6491         return true;
6492     }
6493     break;
6494 
6495   case ARM::sysLDMIA_UPD:
6496   case ARM::sysLDMDA_UPD:
6497   case ARM::sysLDMDB_UPD:
6498   case ARM::sysLDMIB_UPD:
6499     if (!listContainsReg(Inst, 3, ARM::PC))
6500       return Error(Operands[4]->getStartLoc(),
6501                    "writeback register only allowed on system LDM "
6502                    "if PC in register-list");
6503     break;
6504   case ARM::sysSTMIA_UPD:
6505   case ARM::sysSTMDA_UPD:
6506   case ARM::sysSTMDB_UPD:
6507   case ARM::sysSTMIB_UPD:
6508     return Error(Operands[2]->getStartLoc(),
6509                  "system STM cannot have writeback register");
6510   case ARM::tMUL:
6511     // The second source operand must be the same register as the destination
6512     // operand.
6513     //
6514     // In this case, we must directly check the parsed operands because the
6515     // cvtThumbMultiply() function is written in such a way that it guarantees
6516     // this first statement is always true for the new Inst.  Essentially, the
6517     // destination is unconditionally copied into the second source operand
6518     // without checking to see if it matches what we actually parsed.
6519     if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() !=
6520                                  ((ARMOperand &)*Operands[5]).getReg()) &&
6521         (((ARMOperand &)*Operands[3]).getReg() !=
6522          ((ARMOperand &)*Operands[4]).getReg())) {
6523       return Error(Operands[3]->getStartLoc(),
6524                    "destination register must match source register");
6525     }
6526     break;
6527 
6528   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
6529   // so only issue a diagnostic for thumb1. The instructions will be
6530   // switched to the t2 encodings in processInstruction() if necessary.
6531   case ARM::tPOP: {
6532     bool ListContainsBase;
6533     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) &&
6534         !isThumbTwo())
6535       return Error(Operands[2]->getStartLoc(),
6536                    "registers must be in range r0-r7 or pc");
6537     if (validatetLDMRegList(Inst, Operands, 2, !isMClass()))
6538       return true;
6539     break;
6540   }
6541   case ARM::tPUSH: {
6542     bool ListContainsBase;
6543     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) &&
6544         !isThumbTwo())
6545       return Error(Operands[2]->getStartLoc(),
6546                    "registers must be in range r0-r7 or lr");
6547     if (validatetSTMRegList(Inst, Operands, 2))
6548       return true;
6549     break;
6550   }
6551   case ARM::tSTMIA_UPD: {
6552     bool ListContainsBase, InvalidLowList;
6553     InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(),
6554                                           0, ListContainsBase);
6555     if (InvalidLowList && !isThumbTwo())
6556       return Error(Operands[4]->getStartLoc(),
6557                    "registers must be in range r0-r7");
6558 
6559     // This would be converted to a 32-bit stm, but that's not valid if the
6560     // writeback register is in the list.
6561     if (InvalidLowList && ListContainsBase)
6562       return Error(Operands[4]->getStartLoc(),
6563                    "writeback operator '!' not allowed when base register "
6564                    "in register list");
6565 
6566     if (validatetSTMRegList(Inst, Operands, 4))
6567       return true;
6568     break;
6569   }
6570   case ARM::tADDrSP:
6571     // If the non-SP source operand and the destination operand are not the
6572     // same, we need thumb2 (for the wide encoding), or we have an error.
6573     if (!isThumbTwo() &&
6574         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
6575       return Error(Operands[4]->getStartLoc(),
6576                    "source register must be the same as destination");
6577     }
6578     break;
6579 
6580   // Final range checking for Thumb unconditional branch instructions.
6581   case ARM::tB:
6582     if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>())
6583       return Error(Operands[2]->getStartLoc(), "branch target out of range");
6584     break;
6585   case ARM::t2B: {
6586     int op = (Operands[2]->isImm()) ? 2 : 3;
6587     if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>())
6588       return Error(Operands[op]->getStartLoc(), "branch target out of range");
6589     break;
6590   }
6591   // Final range checking for Thumb conditional branch instructions.
6592   case ARM::tBcc:
6593     if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>())
6594       return Error(Operands[2]->getStartLoc(), "branch target out of range");
6595     break;
6596   case ARM::t2Bcc: {
6597     int Op = (Operands[2]->isImm()) ? 2 : 3;
6598     if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>())
6599       return Error(Operands[Op]->getStartLoc(), "branch target out of range");
6600     break;
6601   }
6602   case ARM::tCBZ:
6603   case ARM::tCBNZ: {
6604     if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>())
6605       return Error(Operands[2]->getStartLoc(), "branch target out of range");
6606     break;
6607   }
6608   case ARM::MOVi16:
6609   case ARM::MOVTi16:
6610   case ARM::t2MOVi16:
6611   case ARM::t2MOVTi16:
6612     {
6613     // We want to avoid misleadingly allowing something like "mov r0, <symbol>"
6614     // especially when we turn it into a movw and the expression <symbol> does
6615     // not have a :lower16: or :upper16 as part of the expression.  We don't
6616     // want the behavior of silently truncating, which can be unexpected and
6617     // lead to bugs that are difficult to find since this is an easy mistake
6618     // to make.
6619     int i = (Operands[3]->isImm()) ? 3 : 4;
6620     ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]);
6621     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
6622     if (CE) break;
6623     const MCExpr *E = dyn_cast<MCExpr>(Op.getImm());
6624     if (!E) break;
6625     const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E);
6626     if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 &&
6627                        ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16))
6628       return Error(
6629           Op.getStartLoc(),
6630           "immediate expression for mov requires :lower16: or :upper16");
6631     break;
6632   }
6633   case ARM::HINT:
6634   case ARM::t2HINT:
6635     if (hasRAS()) {
6636       // ESB is not predicable (pred must be AL)
6637       unsigned Imm8 = Inst.getOperand(0).getImm();
6638       unsigned Pred = Inst.getOperand(1).getImm();
6639       if (Imm8 == 0x10 && Pred != ARMCC::AL)
6640         return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not "
6641                                                  "predicable, but condition "
6642                                                  "code specified");
6643     }
6644     // Without the RAS extension, this behaves as any other unallocated hint.
6645     break;
6646   }
6647 
6648   return false;
6649 }
6650 
6651 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
6652   switch(Opc) {
6653   default: llvm_unreachable("unexpected opcode!");
6654   // VST1LN
6655   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
6656   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6657   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6658   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
6659   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
6660   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
6661   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
6662   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
6663   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
6664 
6665   // VST2LN
6666   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
6667   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6668   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6669   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6670   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6671 
6672   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
6673   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
6674   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
6675   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
6676   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
6677 
6678   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
6679   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
6680   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
6681   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
6682   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
6683 
6684   // VST3LN
6685   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
6686   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6687   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6688   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
6689   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6690   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
6691   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
6692   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
6693   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
6694   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
6695   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
6696   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
6697   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
6698   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
6699   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
6700 
6701   // VST3
6702   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
6703   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6704   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6705   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
6706   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6707   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6708   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
6709   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
6710   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
6711   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
6712   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
6713   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
6714   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
6715   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
6716   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
6717   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
6718   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
6719   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
6720 
6721   // VST4LN
6722   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
6723   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6724   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6725   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
6726   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6727   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
6728   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
6729   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
6730   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
6731   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
6732   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
6733   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
6734   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
6735   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
6736   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
6737 
6738   // VST4
6739   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
6740   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6741   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6742   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
6743   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6744   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6745   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
6746   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
6747   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
6748   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
6749   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
6750   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
6751   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
6752   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
6753   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
6754   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
6755   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
6756   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
6757   }
6758 }
6759 
6760 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
6761   switch(Opc) {
6762   default: llvm_unreachable("unexpected opcode!");
6763   // VLD1LN
6764   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
6765   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6766   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6767   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
6768   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
6769   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
6770   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
6771   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
6772   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
6773 
6774   // VLD2LN
6775   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
6776   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6777   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6778   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
6779   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6780   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
6781   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
6782   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
6783   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
6784   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
6785   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
6786   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
6787   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
6788   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
6789   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
6790 
6791   // VLD3DUP
6792   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
6793   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6794   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6795   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
6796   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6797   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6798   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
6799   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
6800   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
6801   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
6802   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
6803   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
6804   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
6805   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
6806   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
6807   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
6808   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
6809   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
6810 
6811   // VLD3LN
6812   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
6813   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6814   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6815   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
6816   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6817   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
6818   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
6819   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
6820   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
6821   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
6822   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
6823   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
6824   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
6825   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
6826   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
6827 
6828   // VLD3
6829   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
6830   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6831   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6832   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
6833   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6834   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6835   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
6836   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
6837   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
6838   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
6839   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
6840   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
6841   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
6842   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
6843   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
6844   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
6845   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
6846   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
6847 
6848   // VLD4LN
6849   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
6850   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6851   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6852   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6853   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6854   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
6855   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
6856   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
6857   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
6858   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
6859   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
6860   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
6861   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
6862   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
6863   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
6864 
6865   // VLD4DUP
6866   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
6867   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6868   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6869   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
6870   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
6871   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6872   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
6873   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
6874   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
6875   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
6876   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
6877   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
6878   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
6879   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
6880   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
6881   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
6882   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
6883   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
6884 
6885   // VLD4
6886   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
6887   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6888   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6889   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
6890   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6891   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6892   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
6893   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
6894   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
6895   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
6896   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
6897   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
6898   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
6899   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
6900   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
6901   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
6902   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
6903   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
6904   }
6905 }
6906 
6907 bool ARMAsmParser::processInstruction(MCInst &Inst,
6908                                       const OperandVector &Operands,
6909                                       MCStreamer &Out) {
6910   // Check if we have the wide qualifier, because if it's present we
6911   // must avoid selecting a 16-bit thumb instruction.
6912   bool HasWideQualifier = false;
6913   for (auto &Op : Operands) {
6914     ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op);
6915     if (ARMOp.isToken() && ARMOp.getToken() == ".w") {
6916       HasWideQualifier = true;
6917       break;
6918     }
6919   }
6920 
6921   switch (Inst.getOpcode()) {
6922   // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction.
6923   case ARM::LDRT_POST:
6924   case ARM::LDRBT_POST: {
6925     const unsigned Opcode =
6926       (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM
6927                                            : ARM::LDRBT_POST_IMM;
6928     MCInst TmpInst;
6929     TmpInst.setOpcode(Opcode);
6930     TmpInst.addOperand(Inst.getOperand(0));
6931     TmpInst.addOperand(Inst.getOperand(1));
6932     TmpInst.addOperand(Inst.getOperand(1));
6933     TmpInst.addOperand(MCOperand::createReg(0));
6934     TmpInst.addOperand(MCOperand::createImm(0));
6935     TmpInst.addOperand(Inst.getOperand(2));
6936     TmpInst.addOperand(Inst.getOperand(3));
6937     Inst = TmpInst;
6938     return true;
6939   }
6940   // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction.
6941   case ARM::STRT_POST:
6942   case ARM::STRBT_POST: {
6943     const unsigned Opcode =
6944       (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM
6945                                            : ARM::STRBT_POST_IMM;
6946     MCInst TmpInst;
6947     TmpInst.setOpcode(Opcode);
6948     TmpInst.addOperand(Inst.getOperand(1));
6949     TmpInst.addOperand(Inst.getOperand(0));
6950     TmpInst.addOperand(Inst.getOperand(1));
6951     TmpInst.addOperand(MCOperand::createReg(0));
6952     TmpInst.addOperand(MCOperand::createImm(0));
6953     TmpInst.addOperand(Inst.getOperand(2));
6954     TmpInst.addOperand(Inst.getOperand(3));
6955     Inst = TmpInst;
6956     return true;
6957   }
6958   // Alias for alternate form of 'ADR Rd, #imm' instruction.
6959   case ARM::ADDri: {
6960     if (Inst.getOperand(1).getReg() != ARM::PC ||
6961         Inst.getOperand(5).getReg() != 0 ||
6962         !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm()))
6963       return false;
6964     MCInst TmpInst;
6965     TmpInst.setOpcode(ARM::ADR);
6966     TmpInst.addOperand(Inst.getOperand(0));
6967     if (Inst.getOperand(2).isImm()) {
6968       // Immediate (mod_imm) will be in its encoded form, we must unencode it
6969       // before passing it to the ADR instruction.
6970       unsigned Enc = Inst.getOperand(2).getImm();
6971       TmpInst.addOperand(MCOperand::createImm(
6972         ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7)));
6973     } else {
6974       // Turn PC-relative expression into absolute expression.
6975       // Reading PC provides the start of the current instruction + 8 and
6976       // the transform to adr is biased by that.
6977       MCSymbol *Dot = getContext().createTempSymbol();
6978       Out.EmitLabel(Dot);
6979       const MCExpr *OpExpr = Inst.getOperand(2).getExpr();
6980       const MCExpr *InstPC = MCSymbolRefExpr::create(Dot,
6981                                                      MCSymbolRefExpr::VK_None,
6982                                                      getContext());
6983       const MCExpr *Const8 = MCConstantExpr::create(8, getContext());
6984       const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8,
6985                                                      getContext());
6986       const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr,
6987                                                         getContext());
6988       TmpInst.addOperand(MCOperand::createExpr(FixupAddr));
6989     }
6990     TmpInst.addOperand(Inst.getOperand(3));
6991     TmpInst.addOperand(Inst.getOperand(4));
6992     Inst = TmpInst;
6993     return true;
6994   }
6995   // Aliases for alternate PC+imm syntax of LDR instructions.
6996   case ARM::t2LDRpcrel:
6997     // Select the narrow version if the immediate will fit.
6998     if (Inst.getOperand(1).getImm() > 0 &&
6999         Inst.getOperand(1).getImm() <= 0xff &&
7000         !HasWideQualifier)
7001       Inst.setOpcode(ARM::tLDRpci);
7002     else
7003       Inst.setOpcode(ARM::t2LDRpci);
7004     return true;
7005   case ARM::t2LDRBpcrel:
7006     Inst.setOpcode(ARM::t2LDRBpci);
7007     return true;
7008   case ARM::t2LDRHpcrel:
7009     Inst.setOpcode(ARM::t2LDRHpci);
7010     return true;
7011   case ARM::t2LDRSBpcrel:
7012     Inst.setOpcode(ARM::t2LDRSBpci);
7013     return true;
7014   case ARM::t2LDRSHpcrel:
7015     Inst.setOpcode(ARM::t2LDRSHpci);
7016     return true;
7017   case ARM::LDRConstPool:
7018   case ARM::tLDRConstPool:
7019   case ARM::t2LDRConstPool: {
7020     // Pseudo instruction ldr rt, =immediate is converted to a
7021     // MOV rt, immediate if immediate is known and representable
7022     // otherwise we create a constant pool entry that we load from.
7023     MCInst TmpInst;
7024     if (Inst.getOpcode() == ARM::LDRConstPool)
7025       TmpInst.setOpcode(ARM::LDRi12);
7026     else if (Inst.getOpcode() == ARM::tLDRConstPool)
7027       TmpInst.setOpcode(ARM::tLDRpci);
7028     else if (Inst.getOpcode() == ARM::t2LDRConstPool)
7029       TmpInst.setOpcode(ARM::t2LDRpci);
7030     const ARMOperand &PoolOperand =
7031       (HasWideQualifier ?
7032        static_cast<ARMOperand &>(*Operands[4]) :
7033        static_cast<ARMOperand &>(*Operands[3]));
7034     const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm();
7035     // If SubExprVal is a constant we may be able to use a MOV
7036     if (isa<MCConstantExpr>(SubExprVal) &&
7037         Inst.getOperand(0).getReg() != ARM::PC &&
7038         Inst.getOperand(0).getReg() != ARM::SP) {
7039       int64_t Value =
7040         (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue();
7041       bool UseMov  = true;
7042       bool MovHasS = true;
7043       if (Inst.getOpcode() == ARM::LDRConstPool) {
7044         // ARM Constant
7045         if (ARM_AM::getSOImmVal(Value) != -1) {
7046           Value = ARM_AM::getSOImmVal(Value);
7047           TmpInst.setOpcode(ARM::MOVi);
7048         }
7049         else if (ARM_AM::getSOImmVal(~Value) != -1) {
7050           Value = ARM_AM::getSOImmVal(~Value);
7051           TmpInst.setOpcode(ARM::MVNi);
7052         }
7053         else if (hasV6T2Ops() &&
7054                  Value >=0 && Value < 65536) {
7055           TmpInst.setOpcode(ARM::MOVi16);
7056           MovHasS = false;
7057         }
7058         else
7059           UseMov = false;
7060       }
7061       else {
7062         // Thumb/Thumb2 Constant
7063         if (hasThumb2() &&
7064             ARM_AM::getT2SOImmVal(Value) != -1)
7065           TmpInst.setOpcode(ARM::t2MOVi);
7066         else if (hasThumb2() &&
7067                  ARM_AM::getT2SOImmVal(~Value) != -1) {
7068           TmpInst.setOpcode(ARM::t2MVNi);
7069           Value = ~Value;
7070         }
7071         else if (hasV8MBaseline() &&
7072                  Value >=0 && Value < 65536) {
7073           TmpInst.setOpcode(ARM::t2MOVi16);
7074           MovHasS = false;
7075         }
7076         else
7077           UseMov = false;
7078       }
7079       if (UseMov) {
7080         TmpInst.addOperand(Inst.getOperand(0));           // Rt
7081         TmpInst.addOperand(MCOperand::createImm(Value));  // Immediate
7082         TmpInst.addOperand(Inst.getOperand(2));           // CondCode
7083         TmpInst.addOperand(Inst.getOperand(3));           // CondCode
7084         if (MovHasS)
7085           TmpInst.addOperand(MCOperand::createReg(0));    // S
7086         Inst = TmpInst;
7087         return true;
7088       }
7089     }
7090     // No opportunity to use MOV/MVN create constant pool
7091     const MCExpr *CPLoc =
7092       getTargetStreamer().addConstantPoolEntry(SubExprVal,
7093                                                PoolOperand.getStartLoc());
7094     TmpInst.addOperand(Inst.getOperand(0));           // Rt
7095     TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool
7096     if (TmpInst.getOpcode() == ARM::LDRi12)
7097       TmpInst.addOperand(MCOperand::createImm(0));    // unused offset
7098     TmpInst.addOperand(Inst.getOperand(2));           // CondCode
7099     TmpInst.addOperand(Inst.getOperand(3));           // CondCode
7100     Inst = TmpInst;
7101     return true;
7102   }
7103   // Handle NEON VST complex aliases.
7104   case ARM::VST1LNdWB_register_Asm_8:
7105   case ARM::VST1LNdWB_register_Asm_16:
7106   case ARM::VST1LNdWB_register_Asm_32: {
7107     MCInst TmpInst;
7108     // Shuffle the operands around so the lane index operand is in the
7109     // right place.
7110     unsigned Spacing;
7111     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7112     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7113     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7114     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7115     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7116     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7117     TmpInst.addOperand(Inst.getOperand(1)); // lane
7118     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7119     TmpInst.addOperand(Inst.getOperand(6));
7120     Inst = TmpInst;
7121     return true;
7122   }
7123 
7124   case ARM::VST2LNdWB_register_Asm_8:
7125   case ARM::VST2LNdWB_register_Asm_16:
7126   case ARM::VST2LNdWB_register_Asm_32:
7127   case ARM::VST2LNqWB_register_Asm_16:
7128   case ARM::VST2LNqWB_register_Asm_32: {
7129     MCInst TmpInst;
7130     // Shuffle the operands around so the lane index operand is in the
7131     // right place.
7132     unsigned Spacing;
7133     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7134     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7135     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7136     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7137     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7138     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7139     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7140                                             Spacing));
7141     TmpInst.addOperand(Inst.getOperand(1)); // lane
7142     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7143     TmpInst.addOperand(Inst.getOperand(6));
7144     Inst = TmpInst;
7145     return true;
7146   }
7147 
7148   case ARM::VST3LNdWB_register_Asm_8:
7149   case ARM::VST3LNdWB_register_Asm_16:
7150   case ARM::VST3LNdWB_register_Asm_32:
7151   case ARM::VST3LNqWB_register_Asm_16:
7152   case ARM::VST3LNqWB_register_Asm_32: {
7153     MCInst TmpInst;
7154     // Shuffle the operands around so the lane index operand is in the
7155     // right place.
7156     unsigned Spacing;
7157     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7158     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7159     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7160     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7161     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7162     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7163     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7164                                             Spacing));
7165     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7166                                             Spacing * 2));
7167     TmpInst.addOperand(Inst.getOperand(1)); // lane
7168     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7169     TmpInst.addOperand(Inst.getOperand(6));
7170     Inst = TmpInst;
7171     return true;
7172   }
7173 
7174   case ARM::VST4LNdWB_register_Asm_8:
7175   case ARM::VST4LNdWB_register_Asm_16:
7176   case ARM::VST4LNdWB_register_Asm_32:
7177   case ARM::VST4LNqWB_register_Asm_16:
7178   case ARM::VST4LNqWB_register_Asm_32: {
7179     MCInst TmpInst;
7180     // Shuffle the operands around so the lane index operand is in the
7181     // right place.
7182     unsigned Spacing;
7183     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7184     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7185     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7186     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7187     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7188     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7189     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7190                                             Spacing));
7191     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7192                                             Spacing * 2));
7193     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7194                                             Spacing * 3));
7195     TmpInst.addOperand(Inst.getOperand(1)); // lane
7196     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7197     TmpInst.addOperand(Inst.getOperand(6));
7198     Inst = TmpInst;
7199     return true;
7200   }
7201 
7202   case ARM::VST1LNdWB_fixed_Asm_8:
7203   case ARM::VST1LNdWB_fixed_Asm_16:
7204   case ARM::VST1LNdWB_fixed_Asm_32: {
7205     MCInst TmpInst;
7206     // Shuffle the operands around so the lane index operand is in the
7207     // right place.
7208     unsigned Spacing;
7209     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7210     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7211     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7212     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7213     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7214     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7215     TmpInst.addOperand(Inst.getOperand(1)); // lane
7216     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7217     TmpInst.addOperand(Inst.getOperand(5));
7218     Inst = TmpInst;
7219     return true;
7220   }
7221 
7222   case ARM::VST2LNdWB_fixed_Asm_8:
7223   case ARM::VST2LNdWB_fixed_Asm_16:
7224   case ARM::VST2LNdWB_fixed_Asm_32:
7225   case ARM::VST2LNqWB_fixed_Asm_16:
7226   case ARM::VST2LNqWB_fixed_Asm_32: {
7227     MCInst TmpInst;
7228     // Shuffle the operands around so the lane index operand is in the
7229     // right place.
7230     unsigned Spacing;
7231     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7232     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7233     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7234     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7235     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7236     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7237     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7238                                             Spacing));
7239     TmpInst.addOperand(Inst.getOperand(1)); // lane
7240     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7241     TmpInst.addOperand(Inst.getOperand(5));
7242     Inst = TmpInst;
7243     return true;
7244   }
7245 
7246   case ARM::VST3LNdWB_fixed_Asm_8:
7247   case ARM::VST3LNdWB_fixed_Asm_16:
7248   case ARM::VST3LNdWB_fixed_Asm_32:
7249   case ARM::VST3LNqWB_fixed_Asm_16:
7250   case ARM::VST3LNqWB_fixed_Asm_32: {
7251     MCInst TmpInst;
7252     // Shuffle the operands around so the lane index operand is in the
7253     // right place.
7254     unsigned Spacing;
7255     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7256     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7257     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7258     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7259     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7260     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7261     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7262                                             Spacing));
7263     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7264                                             Spacing * 2));
7265     TmpInst.addOperand(Inst.getOperand(1)); // lane
7266     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7267     TmpInst.addOperand(Inst.getOperand(5));
7268     Inst = TmpInst;
7269     return true;
7270   }
7271 
7272   case ARM::VST4LNdWB_fixed_Asm_8:
7273   case ARM::VST4LNdWB_fixed_Asm_16:
7274   case ARM::VST4LNdWB_fixed_Asm_32:
7275   case ARM::VST4LNqWB_fixed_Asm_16:
7276   case ARM::VST4LNqWB_fixed_Asm_32: {
7277     MCInst TmpInst;
7278     // Shuffle the operands around so the lane index operand is in the
7279     // right place.
7280     unsigned Spacing;
7281     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7282     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7283     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7284     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7285     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7286     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7287     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7288                                             Spacing));
7289     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7290                                             Spacing * 2));
7291     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7292                                             Spacing * 3));
7293     TmpInst.addOperand(Inst.getOperand(1)); // lane
7294     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7295     TmpInst.addOperand(Inst.getOperand(5));
7296     Inst = TmpInst;
7297     return true;
7298   }
7299 
7300   case ARM::VST1LNdAsm_8:
7301   case ARM::VST1LNdAsm_16:
7302   case ARM::VST1LNdAsm_32: {
7303     MCInst TmpInst;
7304     // Shuffle the operands around so the lane index operand is in the
7305     // right place.
7306     unsigned Spacing;
7307     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7308     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7309     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7310     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7311     TmpInst.addOperand(Inst.getOperand(1)); // lane
7312     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7313     TmpInst.addOperand(Inst.getOperand(5));
7314     Inst = TmpInst;
7315     return true;
7316   }
7317 
7318   case ARM::VST2LNdAsm_8:
7319   case ARM::VST2LNdAsm_16:
7320   case ARM::VST2LNdAsm_32:
7321   case ARM::VST2LNqAsm_16:
7322   case ARM::VST2LNqAsm_32: {
7323     MCInst TmpInst;
7324     // Shuffle the operands around so the lane index operand is in the
7325     // right place.
7326     unsigned Spacing;
7327     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7328     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7329     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7330     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7331     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7332                                             Spacing));
7333     TmpInst.addOperand(Inst.getOperand(1)); // lane
7334     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7335     TmpInst.addOperand(Inst.getOperand(5));
7336     Inst = TmpInst;
7337     return true;
7338   }
7339 
7340   case ARM::VST3LNdAsm_8:
7341   case ARM::VST3LNdAsm_16:
7342   case ARM::VST3LNdAsm_32:
7343   case ARM::VST3LNqAsm_16:
7344   case ARM::VST3LNqAsm_32: {
7345     MCInst TmpInst;
7346     // Shuffle the operands around so the lane index operand is in the
7347     // right place.
7348     unsigned Spacing;
7349     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7350     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7351     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7352     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7353     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7354                                             Spacing));
7355     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7356                                             Spacing * 2));
7357     TmpInst.addOperand(Inst.getOperand(1)); // lane
7358     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7359     TmpInst.addOperand(Inst.getOperand(5));
7360     Inst = TmpInst;
7361     return true;
7362   }
7363 
7364   case ARM::VST4LNdAsm_8:
7365   case ARM::VST4LNdAsm_16:
7366   case ARM::VST4LNdAsm_32:
7367   case ARM::VST4LNqAsm_16:
7368   case ARM::VST4LNqAsm_32: {
7369     MCInst TmpInst;
7370     // Shuffle the operands around so the lane index operand is in the
7371     // right place.
7372     unsigned Spacing;
7373     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
7374     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7375     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7376     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7377     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7378                                             Spacing));
7379     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7380                                             Spacing * 2));
7381     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7382                                             Spacing * 3));
7383     TmpInst.addOperand(Inst.getOperand(1)); // lane
7384     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7385     TmpInst.addOperand(Inst.getOperand(5));
7386     Inst = TmpInst;
7387     return true;
7388   }
7389 
7390   // Handle NEON VLD complex aliases.
7391   case ARM::VLD1LNdWB_register_Asm_8:
7392   case ARM::VLD1LNdWB_register_Asm_16:
7393   case ARM::VLD1LNdWB_register_Asm_32: {
7394     MCInst TmpInst;
7395     // Shuffle the operands around so the lane index operand is in the
7396     // right place.
7397     unsigned Spacing;
7398     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7399     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7400     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7401     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7402     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7403     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7404     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7405     TmpInst.addOperand(Inst.getOperand(1)); // lane
7406     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7407     TmpInst.addOperand(Inst.getOperand(6));
7408     Inst = TmpInst;
7409     return true;
7410   }
7411 
7412   case ARM::VLD2LNdWB_register_Asm_8:
7413   case ARM::VLD2LNdWB_register_Asm_16:
7414   case ARM::VLD2LNdWB_register_Asm_32:
7415   case ARM::VLD2LNqWB_register_Asm_16:
7416   case ARM::VLD2LNqWB_register_Asm_32: {
7417     MCInst TmpInst;
7418     // Shuffle the operands around so the lane index operand is in the
7419     // right place.
7420     unsigned Spacing;
7421     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7422     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7423     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7424                                             Spacing));
7425     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7426     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7427     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7428     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7429     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7430     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7431                                             Spacing));
7432     TmpInst.addOperand(Inst.getOperand(1)); // lane
7433     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7434     TmpInst.addOperand(Inst.getOperand(6));
7435     Inst = TmpInst;
7436     return true;
7437   }
7438 
7439   case ARM::VLD3LNdWB_register_Asm_8:
7440   case ARM::VLD3LNdWB_register_Asm_16:
7441   case ARM::VLD3LNdWB_register_Asm_32:
7442   case ARM::VLD3LNqWB_register_Asm_16:
7443   case ARM::VLD3LNqWB_register_Asm_32: {
7444     MCInst TmpInst;
7445     // Shuffle the operands around so the lane index operand is in the
7446     // right place.
7447     unsigned Spacing;
7448     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7449     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7450     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7451                                             Spacing));
7452     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7453                                             Spacing * 2));
7454     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7455     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7456     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7457     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7458     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7459     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7460                                             Spacing));
7461     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7462                                             Spacing * 2));
7463     TmpInst.addOperand(Inst.getOperand(1)); // lane
7464     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7465     TmpInst.addOperand(Inst.getOperand(6));
7466     Inst = TmpInst;
7467     return true;
7468   }
7469 
7470   case ARM::VLD4LNdWB_register_Asm_8:
7471   case ARM::VLD4LNdWB_register_Asm_16:
7472   case ARM::VLD4LNdWB_register_Asm_32:
7473   case ARM::VLD4LNqWB_register_Asm_16:
7474   case ARM::VLD4LNqWB_register_Asm_32: {
7475     MCInst TmpInst;
7476     // Shuffle the operands around so the lane index operand is in the
7477     // right place.
7478     unsigned Spacing;
7479     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7480     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7481     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7482                                             Spacing));
7483     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7484                                             Spacing * 2));
7485     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7486                                             Spacing * 3));
7487     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7488     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7489     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7490     TmpInst.addOperand(Inst.getOperand(4)); // Rm
7491     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7492     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7493                                             Spacing));
7494     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7495                                             Spacing * 2));
7496     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7497                                             Spacing * 3));
7498     TmpInst.addOperand(Inst.getOperand(1)); // lane
7499     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
7500     TmpInst.addOperand(Inst.getOperand(6));
7501     Inst = TmpInst;
7502     return true;
7503   }
7504 
7505   case ARM::VLD1LNdWB_fixed_Asm_8:
7506   case ARM::VLD1LNdWB_fixed_Asm_16:
7507   case ARM::VLD1LNdWB_fixed_Asm_32: {
7508     MCInst TmpInst;
7509     // Shuffle the operands around so the lane index operand is in the
7510     // right place.
7511     unsigned Spacing;
7512     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7513     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7514     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7515     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7516     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7517     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7518     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7519     TmpInst.addOperand(Inst.getOperand(1)); // lane
7520     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7521     TmpInst.addOperand(Inst.getOperand(5));
7522     Inst = TmpInst;
7523     return true;
7524   }
7525 
7526   case ARM::VLD2LNdWB_fixed_Asm_8:
7527   case ARM::VLD2LNdWB_fixed_Asm_16:
7528   case ARM::VLD2LNdWB_fixed_Asm_32:
7529   case ARM::VLD2LNqWB_fixed_Asm_16:
7530   case ARM::VLD2LNqWB_fixed_Asm_32: {
7531     MCInst TmpInst;
7532     // Shuffle the operands around so the lane index operand is in the
7533     // right place.
7534     unsigned Spacing;
7535     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7536     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7537     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7538                                             Spacing));
7539     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7540     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7541     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7542     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7543     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7544     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7545                                             Spacing));
7546     TmpInst.addOperand(Inst.getOperand(1)); // lane
7547     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7548     TmpInst.addOperand(Inst.getOperand(5));
7549     Inst = TmpInst;
7550     return true;
7551   }
7552 
7553   case ARM::VLD3LNdWB_fixed_Asm_8:
7554   case ARM::VLD3LNdWB_fixed_Asm_16:
7555   case ARM::VLD3LNdWB_fixed_Asm_32:
7556   case ARM::VLD3LNqWB_fixed_Asm_16:
7557   case ARM::VLD3LNqWB_fixed_Asm_32: {
7558     MCInst TmpInst;
7559     // Shuffle the operands around so the lane index operand is in the
7560     // right place.
7561     unsigned Spacing;
7562     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7563     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7564     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7565                                             Spacing));
7566     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7567                                             Spacing * 2));
7568     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7569     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7570     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7571     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7572     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7573     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7574                                             Spacing));
7575     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7576                                             Spacing * 2));
7577     TmpInst.addOperand(Inst.getOperand(1)); // lane
7578     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7579     TmpInst.addOperand(Inst.getOperand(5));
7580     Inst = TmpInst;
7581     return true;
7582   }
7583 
7584   case ARM::VLD4LNdWB_fixed_Asm_8:
7585   case ARM::VLD4LNdWB_fixed_Asm_16:
7586   case ARM::VLD4LNdWB_fixed_Asm_32:
7587   case ARM::VLD4LNqWB_fixed_Asm_16:
7588   case ARM::VLD4LNqWB_fixed_Asm_32: {
7589     MCInst TmpInst;
7590     // Shuffle the operands around so the lane index operand is in the
7591     // right place.
7592     unsigned Spacing;
7593     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7594     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7595     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7596                                             Spacing));
7597     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7598                                             Spacing * 2));
7599     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7600                                             Spacing * 3));
7601     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
7602     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7603     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7604     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7605     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7606     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7607                                             Spacing));
7608     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7609                                             Spacing * 2));
7610     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7611                                             Spacing * 3));
7612     TmpInst.addOperand(Inst.getOperand(1)); // lane
7613     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7614     TmpInst.addOperand(Inst.getOperand(5));
7615     Inst = TmpInst;
7616     return true;
7617   }
7618 
7619   case ARM::VLD1LNdAsm_8:
7620   case ARM::VLD1LNdAsm_16:
7621   case ARM::VLD1LNdAsm_32: {
7622     MCInst TmpInst;
7623     // Shuffle the operands around so the lane index operand is in the
7624     // right place.
7625     unsigned Spacing;
7626     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7627     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7628     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7629     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7630     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7631     TmpInst.addOperand(Inst.getOperand(1)); // lane
7632     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7633     TmpInst.addOperand(Inst.getOperand(5));
7634     Inst = TmpInst;
7635     return true;
7636   }
7637 
7638   case ARM::VLD2LNdAsm_8:
7639   case ARM::VLD2LNdAsm_16:
7640   case ARM::VLD2LNdAsm_32:
7641   case ARM::VLD2LNqAsm_16:
7642   case ARM::VLD2LNqAsm_32: {
7643     MCInst TmpInst;
7644     // Shuffle the operands around so the lane index operand is in the
7645     // right place.
7646     unsigned Spacing;
7647     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7648     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7649     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7650                                             Spacing));
7651     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7652     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7653     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7654     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7655                                             Spacing));
7656     TmpInst.addOperand(Inst.getOperand(1)); // lane
7657     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7658     TmpInst.addOperand(Inst.getOperand(5));
7659     Inst = TmpInst;
7660     return true;
7661   }
7662 
7663   case ARM::VLD3LNdAsm_8:
7664   case ARM::VLD3LNdAsm_16:
7665   case ARM::VLD3LNdAsm_32:
7666   case ARM::VLD3LNqAsm_16:
7667   case ARM::VLD3LNqAsm_32: {
7668     MCInst TmpInst;
7669     // Shuffle the operands around so the lane index operand is in the
7670     // right place.
7671     unsigned Spacing;
7672     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7673     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7674     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7675                                             Spacing));
7676     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7677                                             Spacing * 2));
7678     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7679     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7680     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7681     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7682                                             Spacing));
7683     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7684                                             Spacing * 2));
7685     TmpInst.addOperand(Inst.getOperand(1)); // lane
7686     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7687     TmpInst.addOperand(Inst.getOperand(5));
7688     Inst = TmpInst;
7689     return true;
7690   }
7691 
7692   case ARM::VLD4LNdAsm_8:
7693   case ARM::VLD4LNdAsm_16:
7694   case ARM::VLD4LNdAsm_32:
7695   case ARM::VLD4LNqAsm_16:
7696   case ARM::VLD4LNqAsm_32: {
7697     MCInst TmpInst;
7698     // Shuffle the operands around so the lane index operand is in the
7699     // right place.
7700     unsigned Spacing;
7701     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7702     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7703     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7704                                             Spacing));
7705     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7706                                             Spacing * 2));
7707     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7708                                             Spacing * 3));
7709     TmpInst.addOperand(Inst.getOperand(2)); // Rn
7710     TmpInst.addOperand(Inst.getOperand(3)); // alignment
7711     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
7712     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7713                                             Spacing));
7714     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7715                                             Spacing * 2));
7716     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7717                                             Spacing * 3));
7718     TmpInst.addOperand(Inst.getOperand(1)); // lane
7719     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7720     TmpInst.addOperand(Inst.getOperand(5));
7721     Inst = TmpInst;
7722     return true;
7723   }
7724 
7725   // VLD3DUP single 3-element structure to all lanes instructions.
7726   case ARM::VLD3DUPdAsm_8:
7727   case ARM::VLD3DUPdAsm_16:
7728   case ARM::VLD3DUPdAsm_32:
7729   case ARM::VLD3DUPqAsm_8:
7730   case ARM::VLD3DUPqAsm_16:
7731   case ARM::VLD3DUPqAsm_32: {
7732     MCInst TmpInst;
7733     unsigned Spacing;
7734     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7735     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7736     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7737                                             Spacing));
7738     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7739                                             Spacing * 2));
7740     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7741     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7742     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7743     TmpInst.addOperand(Inst.getOperand(4));
7744     Inst = TmpInst;
7745     return true;
7746   }
7747 
7748   case ARM::VLD3DUPdWB_fixed_Asm_8:
7749   case ARM::VLD3DUPdWB_fixed_Asm_16:
7750   case ARM::VLD3DUPdWB_fixed_Asm_32:
7751   case ARM::VLD3DUPqWB_fixed_Asm_8:
7752   case ARM::VLD3DUPqWB_fixed_Asm_16:
7753   case ARM::VLD3DUPqWB_fixed_Asm_32: {
7754     MCInst TmpInst;
7755     unsigned Spacing;
7756     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7757     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7758     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7759                                             Spacing));
7760     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7761                                             Spacing * 2));
7762     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7763     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7764     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7765     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7766     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7767     TmpInst.addOperand(Inst.getOperand(4));
7768     Inst = TmpInst;
7769     return true;
7770   }
7771 
7772   case ARM::VLD3DUPdWB_register_Asm_8:
7773   case ARM::VLD3DUPdWB_register_Asm_16:
7774   case ARM::VLD3DUPdWB_register_Asm_32:
7775   case ARM::VLD3DUPqWB_register_Asm_8:
7776   case ARM::VLD3DUPqWB_register_Asm_16:
7777   case ARM::VLD3DUPqWB_register_Asm_32: {
7778     MCInst TmpInst;
7779     unsigned Spacing;
7780     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7781     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7782     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7783                                             Spacing));
7784     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7785                                             Spacing * 2));
7786     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7787     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7788     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7789     TmpInst.addOperand(Inst.getOperand(3)); // Rm
7790     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7791     TmpInst.addOperand(Inst.getOperand(5));
7792     Inst = TmpInst;
7793     return true;
7794   }
7795 
7796   // VLD3 multiple 3-element structure instructions.
7797   case ARM::VLD3dAsm_8:
7798   case ARM::VLD3dAsm_16:
7799   case ARM::VLD3dAsm_32:
7800   case ARM::VLD3qAsm_8:
7801   case ARM::VLD3qAsm_16:
7802   case ARM::VLD3qAsm_32: {
7803     MCInst TmpInst;
7804     unsigned Spacing;
7805     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7806     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7807     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7808                                             Spacing));
7809     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7810                                             Spacing * 2));
7811     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7812     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7813     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7814     TmpInst.addOperand(Inst.getOperand(4));
7815     Inst = TmpInst;
7816     return true;
7817   }
7818 
7819   case ARM::VLD3dWB_fixed_Asm_8:
7820   case ARM::VLD3dWB_fixed_Asm_16:
7821   case ARM::VLD3dWB_fixed_Asm_32:
7822   case ARM::VLD3qWB_fixed_Asm_8:
7823   case ARM::VLD3qWB_fixed_Asm_16:
7824   case ARM::VLD3qWB_fixed_Asm_32: {
7825     MCInst TmpInst;
7826     unsigned Spacing;
7827     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7828     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7829     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7830                                             Spacing));
7831     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7832                                             Spacing * 2));
7833     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7834     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7835     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7836     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7837     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7838     TmpInst.addOperand(Inst.getOperand(4));
7839     Inst = TmpInst;
7840     return true;
7841   }
7842 
7843   case ARM::VLD3dWB_register_Asm_8:
7844   case ARM::VLD3dWB_register_Asm_16:
7845   case ARM::VLD3dWB_register_Asm_32:
7846   case ARM::VLD3qWB_register_Asm_8:
7847   case ARM::VLD3qWB_register_Asm_16:
7848   case ARM::VLD3qWB_register_Asm_32: {
7849     MCInst TmpInst;
7850     unsigned Spacing;
7851     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7852     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7853     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7854                                             Spacing));
7855     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7856                                             Spacing * 2));
7857     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7858     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7859     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7860     TmpInst.addOperand(Inst.getOperand(3)); // Rm
7861     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7862     TmpInst.addOperand(Inst.getOperand(5));
7863     Inst = TmpInst;
7864     return true;
7865   }
7866 
7867   // VLD4DUP single 3-element structure to all lanes instructions.
7868   case ARM::VLD4DUPdAsm_8:
7869   case ARM::VLD4DUPdAsm_16:
7870   case ARM::VLD4DUPdAsm_32:
7871   case ARM::VLD4DUPqAsm_8:
7872   case ARM::VLD4DUPqAsm_16:
7873   case ARM::VLD4DUPqAsm_32: {
7874     MCInst TmpInst;
7875     unsigned Spacing;
7876     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7877     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7878     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7879                                             Spacing));
7880     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7881                                             Spacing * 2));
7882     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7883                                             Spacing * 3));
7884     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7885     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7886     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7887     TmpInst.addOperand(Inst.getOperand(4));
7888     Inst = TmpInst;
7889     return true;
7890   }
7891 
7892   case ARM::VLD4DUPdWB_fixed_Asm_8:
7893   case ARM::VLD4DUPdWB_fixed_Asm_16:
7894   case ARM::VLD4DUPdWB_fixed_Asm_32:
7895   case ARM::VLD4DUPqWB_fixed_Asm_8:
7896   case ARM::VLD4DUPqWB_fixed_Asm_16:
7897   case ARM::VLD4DUPqWB_fixed_Asm_32: {
7898     MCInst TmpInst;
7899     unsigned Spacing;
7900     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7901     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7902     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7903                                             Spacing));
7904     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7905                                             Spacing * 2));
7906     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7907                                             Spacing * 3));
7908     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7909     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7910     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7911     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7912     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7913     TmpInst.addOperand(Inst.getOperand(4));
7914     Inst = TmpInst;
7915     return true;
7916   }
7917 
7918   case ARM::VLD4DUPdWB_register_Asm_8:
7919   case ARM::VLD4DUPdWB_register_Asm_16:
7920   case ARM::VLD4DUPdWB_register_Asm_32:
7921   case ARM::VLD4DUPqWB_register_Asm_8:
7922   case ARM::VLD4DUPqWB_register_Asm_16:
7923   case ARM::VLD4DUPqWB_register_Asm_32: {
7924     MCInst TmpInst;
7925     unsigned Spacing;
7926     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7927     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7928     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7929                                             Spacing));
7930     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7931                                             Spacing * 2));
7932     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7933                                             Spacing * 3));
7934     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7935     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7936     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7937     TmpInst.addOperand(Inst.getOperand(3)); // Rm
7938     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
7939     TmpInst.addOperand(Inst.getOperand(5));
7940     Inst = TmpInst;
7941     return true;
7942   }
7943 
7944   // VLD4 multiple 4-element structure instructions.
7945   case ARM::VLD4dAsm_8:
7946   case ARM::VLD4dAsm_16:
7947   case ARM::VLD4dAsm_32:
7948   case ARM::VLD4qAsm_8:
7949   case ARM::VLD4qAsm_16:
7950   case ARM::VLD4qAsm_32: {
7951     MCInst TmpInst;
7952     unsigned Spacing;
7953     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7954     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7955     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7956                                             Spacing));
7957     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7958                                             Spacing * 2));
7959     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7960                                             Spacing * 3));
7961     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7962     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7963     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7964     TmpInst.addOperand(Inst.getOperand(4));
7965     Inst = TmpInst;
7966     return true;
7967   }
7968 
7969   case ARM::VLD4dWB_fixed_Asm_8:
7970   case ARM::VLD4dWB_fixed_Asm_16:
7971   case ARM::VLD4dWB_fixed_Asm_32:
7972   case ARM::VLD4qWB_fixed_Asm_8:
7973   case ARM::VLD4qWB_fixed_Asm_16:
7974   case ARM::VLD4qWB_fixed_Asm_32: {
7975     MCInst TmpInst;
7976     unsigned Spacing;
7977     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
7978     TmpInst.addOperand(Inst.getOperand(0)); // Vd
7979     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7980                                             Spacing));
7981     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7982                                             Spacing * 2));
7983     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
7984                                             Spacing * 3));
7985     TmpInst.addOperand(Inst.getOperand(1)); // Rn
7986     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
7987     TmpInst.addOperand(Inst.getOperand(2)); // alignment
7988     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
7989     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
7990     TmpInst.addOperand(Inst.getOperand(4));
7991     Inst = TmpInst;
7992     return true;
7993   }
7994 
7995   case ARM::VLD4dWB_register_Asm_8:
7996   case ARM::VLD4dWB_register_Asm_16:
7997   case ARM::VLD4dWB_register_Asm_32:
7998   case ARM::VLD4qWB_register_Asm_8:
7999   case ARM::VLD4qWB_register_Asm_16:
8000   case ARM::VLD4qWB_register_Asm_32: {
8001     MCInst TmpInst;
8002     unsigned Spacing;
8003     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
8004     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8005     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8006                                             Spacing));
8007     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8008                                             Spacing * 2));
8009     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8010                                             Spacing * 3));
8011     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8012     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8013     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8014     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8015     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8016     TmpInst.addOperand(Inst.getOperand(5));
8017     Inst = TmpInst;
8018     return true;
8019   }
8020 
8021   // VST3 multiple 3-element structure instructions.
8022   case ARM::VST3dAsm_8:
8023   case ARM::VST3dAsm_16:
8024   case ARM::VST3dAsm_32:
8025   case ARM::VST3qAsm_8:
8026   case ARM::VST3qAsm_16:
8027   case ARM::VST3qAsm_32: {
8028     MCInst TmpInst;
8029     unsigned Spacing;
8030     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8031     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8032     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8033     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8034     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8035                                             Spacing));
8036     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8037                                             Spacing * 2));
8038     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8039     TmpInst.addOperand(Inst.getOperand(4));
8040     Inst = TmpInst;
8041     return true;
8042   }
8043 
8044   case ARM::VST3dWB_fixed_Asm_8:
8045   case ARM::VST3dWB_fixed_Asm_16:
8046   case ARM::VST3dWB_fixed_Asm_32:
8047   case ARM::VST3qWB_fixed_Asm_8:
8048   case ARM::VST3qWB_fixed_Asm_16:
8049   case ARM::VST3qWB_fixed_Asm_32: {
8050     MCInst TmpInst;
8051     unsigned Spacing;
8052     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8053     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8054     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8055     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8056     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8057     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8058     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8059                                             Spacing));
8060     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8061                                             Spacing * 2));
8062     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8063     TmpInst.addOperand(Inst.getOperand(4));
8064     Inst = TmpInst;
8065     return true;
8066   }
8067 
8068   case ARM::VST3dWB_register_Asm_8:
8069   case ARM::VST3dWB_register_Asm_16:
8070   case ARM::VST3dWB_register_Asm_32:
8071   case ARM::VST3qWB_register_Asm_8:
8072   case ARM::VST3qWB_register_Asm_16:
8073   case ARM::VST3qWB_register_Asm_32: {
8074     MCInst TmpInst;
8075     unsigned Spacing;
8076     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8077     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8078     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8079     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8080     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8081     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8082     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8083                                             Spacing));
8084     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8085                                             Spacing * 2));
8086     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8087     TmpInst.addOperand(Inst.getOperand(5));
8088     Inst = TmpInst;
8089     return true;
8090   }
8091 
8092   // VST4 multiple 3-element structure instructions.
8093   case ARM::VST4dAsm_8:
8094   case ARM::VST4dAsm_16:
8095   case ARM::VST4dAsm_32:
8096   case ARM::VST4qAsm_8:
8097   case ARM::VST4qAsm_16:
8098   case ARM::VST4qAsm_32: {
8099     MCInst TmpInst;
8100     unsigned Spacing;
8101     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8102     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8103     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8104     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8105     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8106                                             Spacing));
8107     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8108                                             Spacing * 2));
8109     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8110                                             Spacing * 3));
8111     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8112     TmpInst.addOperand(Inst.getOperand(4));
8113     Inst = TmpInst;
8114     return true;
8115   }
8116 
8117   case ARM::VST4dWB_fixed_Asm_8:
8118   case ARM::VST4dWB_fixed_Asm_16:
8119   case ARM::VST4dWB_fixed_Asm_32:
8120   case ARM::VST4qWB_fixed_Asm_8:
8121   case ARM::VST4qWB_fixed_Asm_16:
8122   case ARM::VST4qWB_fixed_Asm_32: {
8123     MCInst TmpInst;
8124     unsigned Spacing;
8125     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8126     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8127     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8128     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8129     TmpInst.addOperand(MCOperand::createReg(0)); // Rm
8130     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8131     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8132                                             Spacing));
8133     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8134                                             Spacing * 2));
8135     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8136                                             Spacing * 3));
8137     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8138     TmpInst.addOperand(Inst.getOperand(4));
8139     Inst = TmpInst;
8140     return true;
8141   }
8142 
8143   case ARM::VST4dWB_register_Asm_8:
8144   case ARM::VST4dWB_register_Asm_16:
8145   case ARM::VST4dWB_register_Asm_32:
8146   case ARM::VST4qWB_register_Asm_8:
8147   case ARM::VST4qWB_register_Asm_16:
8148   case ARM::VST4qWB_register_Asm_32: {
8149     MCInst TmpInst;
8150     unsigned Spacing;
8151     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
8152     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8153     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
8154     TmpInst.addOperand(Inst.getOperand(2)); // alignment
8155     TmpInst.addOperand(Inst.getOperand(3)); // Rm
8156     TmpInst.addOperand(Inst.getOperand(0)); // Vd
8157     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8158                                             Spacing));
8159     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8160                                             Spacing * 2));
8161     TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() +
8162                                             Spacing * 3));
8163     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8164     TmpInst.addOperand(Inst.getOperand(5));
8165     Inst = TmpInst;
8166     return true;
8167   }
8168 
8169   // Handle encoding choice for the shift-immediate instructions.
8170   case ARM::t2LSLri:
8171   case ARM::t2LSRri:
8172   case ARM::t2ASRri:
8173     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8174         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8175         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8176         !HasWideQualifier) {
8177       unsigned NewOpc;
8178       switch (Inst.getOpcode()) {
8179       default: llvm_unreachable("unexpected opcode");
8180       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
8181       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
8182       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
8183       }
8184       // The Thumb1 operands aren't in the same order. Awesome, eh?
8185       MCInst TmpInst;
8186       TmpInst.setOpcode(NewOpc);
8187       TmpInst.addOperand(Inst.getOperand(0));
8188       TmpInst.addOperand(Inst.getOperand(5));
8189       TmpInst.addOperand(Inst.getOperand(1));
8190       TmpInst.addOperand(Inst.getOperand(2));
8191       TmpInst.addOperand(Inst.getOperand(3));
8192       TmpInst.addOperand(Inst.getOperand(4));
8193       Inst = TmpInst;
8194       return true;
8195     }
8196     return false;
8197 
8198   // Handle the Thumb2 mode MOV complex aliases.
8199   case ARM::t2MOVsr:
8200   case ARM::t2MOVSsr: {
8201     // Which instruction to expand to depends on the CCOut operand and
8202     // whether we're in an IT block if the register operands are low
8203     // registers.
8204     bool isNarrow = false;
8205     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8206         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8207         isARMLowRegister(Inst.getOperand(2).getReg()) &&
8208         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8209         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) &&
8210         !HasWideQualifier)
8211       isNarrow = true;
8212     MCInst TmpInst;
8213     unsigned newOpc;
8214     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
8215     default: llvm_unreachable("unexpected opcode!");
8216     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
8217     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
8218     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
8219     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
8220     }
8221     TmpInst.setOpcode(newOpc);
8222     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8223     if (isNarrow)
8224       TmpInst.addOperand(MCOperand::createReg(
8225           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
8226     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8227     TmpInst.addOperand(Inst.getOperand(2)); // Rm
8228     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
8229     TmpInst.addOperand(Inst.getOperand(5));
8230     if (!isNarrow)
8231       TmpInst.addOperand(MCOperand::createReg(
8232           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
8233     Inst = TmpInst;
8234     return true;
8235   }
8236   case ARM::t2MOVsi:
8237   case ARM::t2MOVSsi: {
8238     // Which instruction to expand to depends on the CCOut operand and
8239     // whether we're in an IT block if the register operands are low
8240     // registers.
8241     bool isNarrow = false;
8242     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8243         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8244         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) &&
8245         !HasWideQualifier)
8246       isNarrow = true;
8247     MCInst TmpInst;
8248     unsigned newOpc;
8249     unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
8250     unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
8251     bool isMov = false;
8252     // MOV rd, rm, LSL #0 is actually a MOV instruction
8253     if (Shift == ARM_AM::lsl && Amount == 0) {
8254       isMov = true;
8255       // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of
8256       // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is
8257       // unpredictable in an IT block so the 32-bit encoding T3 has to be used
8258       // instead.
8259       if (inITBlock()) {
8260         isNarrow = false;
8261       }
8262       newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr;
8263     } else {
8264       switch(Shift) {
8265       default: llvm_unreachable("unexpected opcode!");
8266       case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
8267       case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
8268       case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
8269       case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
8270       case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
8271       }
8272     }
8273     if (Amount == 32) Amount = 0;
8274     TmpInst.setOpcode(newOpc);
8275     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8276     if (isNarrow && !isMov)
8277       TmpInst.addOperand(MCOperand::createReg(
8278           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
8279     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8280     if (newOpc != ARM::t2RRX && !isMov)
8281       TmpInst.addOperand(MCOperand::createImm(Amount));
8282     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8283     TmpInst.addOperand(Inst.getOperand(4));
8284     if (!isNarrow)
8285       TmpInst.addOperand(MCOperand::createReg(
8286           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
8287     Inst = TmpInst;
8288     return true;
8289   }
8290   // Handle the ARM mode MOV complex aliases.
8291   case ARM::ASRr:
8292   case ARM::LSRr:
8293   case ARM::LSLr:
8294   case ARM::RORr: {
8295     ARM_AM::ShiftOpc ShiftTy;
8296     switch(Inst.getOpcode()) {
8297     default: llvm_unreachable("unexpected opcode!");
8298     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
8299     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
8300     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
8301     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
8302     }
8303     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
8304     MCInst TmpInst;
8305     TmpInst.setOpcode(ARM::MOVsr);
8306     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8307     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8308     TmpInst.addOperand(Inst.getOperand(2)); // Rm
8309     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8310     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8311     TmpInst.addOperand(Inst.getOperand(4));
8312     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
8313     Inst = TmpInst;
8314     return true;
8315   }
8316   case ARM::ASRi:
8317   case ARM::LSRi:
8318   case ARM::LSLi:
8319   case ARM::RORi: {
8320     ARM_AM::ShiftOpc ShiftTy;
8321     switch(Inst.getOpcode()) {
8322     default: llvm_unreachable("unexpected opcode!");
8323     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
8324     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
8325     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
8326     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
8327     }
8328     // A shift by zero is a plain MOVr, not a MOVsi.
8329     unsigned Amt = Inst.getOperand(2).getImm();
8330     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
8331     // A shift by 32 should be encoded as 0 when permitted
8332     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
8333       Amt = 0;
8334     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
8335     MCInst TmpInst;
8336     TmpInst.setOpcode(Opc);
8337     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8338     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8339     if (Opc == ARM::MOVsi)
8340       TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8341     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
8342     TmpInst.addOperand(Inst.getOperand(4));
8343     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
8344     Inst = TmpInst;
8345     return true;
8346   }
8347   case ARM::RRXi: {
8348     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
8349     MCInst TmpInst;
8350     TmpInst.setOpcode(ARM::MOVsi);
8351     TmpInst.addOperand(Inst.getOperand(0)); // Rd
8352     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8353     TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty
8354     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8355     TmpInst.addOperand(Inst.getOperand(3));
8356     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
8357     Inst = TmpInst;
8358     return true;
8359   }
8360   case ARM::t2LDMIA_UPD: {
8361     // If this is a load of a single register, then we should use
8362     // a post-indexed LDR instruction instead, per the ARM ARM.
8363     if (Inst.getNumOperands() != 5)
8364       return false;
8365     MCInst TmpInst;
8366     TmpInst.setOpcode(ARM::t2LDR_POST);
8367     TmpInst.addOperand(Inst.getOperand(4)); // Rt
8368     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8369     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8370     TmpInst.addOperand(MCOperand::createImm(4));
8371     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8372     TmpInst.addOperand(Inst.getOperand(3));
8373     Inst = TmpInst;
8374     return true;
8375   }
8376   case ARM::t2STMDB_UPD: {
8377     // If this is a store of a single register, then we should use
8378     // a pre-indexed STR instruction instead, per the ARM ARM.
8379     if (Inst.getNumOperands() != 5)
8380       return false;
8381     MCInst TmpInst;
8382     TmpInst.setOpcode(ARM::t2STR_PRE);
8383     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8384     TmpInst.addOperand(Inst.getOperand(4)); // Rt
8385     TmpInst.addOperand(Inst.getOperand(1)); // Rn
8386     TmpInst.addOperand(MCOperand::createImm(-4));
8387     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8388     TmpInst.addOperand(Inst.getOperand(3));
8389     Inst = TmpInst;
8390     return true;
8391   }
8392   case ARM::LDMIA_UPD:
8393     // If this is a load of a single register via a 'pop', then we should use
8394     // a post-indexed LDR instruction instead, per the ARM ARM.
8395     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" &&
8396         Inst.getNumOperands() == 5) {
8397       MCInst TmpInst;
8398       TmpInst.setOpcode(ARM::LDR_POST_IMM);
8399       TmpInst.addOperand(Inst.getOperand(4)); // Rt
8400       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8401       TmpInst.addOperand(Inst.getOperand(1)); // Rn
8402       TmpInst.addOperand(MCOperand::createReg(0));  // am2offset
8403       TmpInst.addOperand(MCOperand::createImm(4));
8404       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8405       TmpInst.addOperand(Inst.getOperand(3));
8406       Inst = TmpInst;
8407       return true;
8408     }
8409     break;
8410   case ARM::STMDB_UPD:
8411     // If this is a store of a single register via a 'push', then we should use
8412     // a pre-indexed STR instruction instead, per the ARM ARM.
8413     if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" &&
8414         Inst.getNumOperands() == 5) {
8415       MCInst TmpInst;
8416       TmpInst.setOpcode(ARM::STR_PRE_IMM);
8417       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
8418       TmpInst.addOperand(Inst.getOperand(4)); // Rt
8419       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
8420       TmpInst.addOperand(MCOperand::createImm(-4));
8421       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
8422       TmpInst.addOperand(Inst.getOperand(3));
8423       Inst = TmpInst;
8424     }
8425     break;
8426   case ARM::t2ADDri12:
8427     // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
8428     // mnemonic was used (not "addw"), encoding T3 is preferred.
8429     if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" ||
8430         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8431       break;
8432     Inst.setOpcode(ARM::t2ADDri);
8433     Inst.addOperand(MCOperand::createReg(0)); // cc_out
8434     break;
8435   case ARM::t2SUBri12:
8436     // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
8437     // mnemonic was used (not "subw"), encoding T3 is preferred.
8438     if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" ||
8439         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
8440       break;
8441     Inst.setOpcode(ARM::t2SUBri);
8442     Inst.addOperand(MCOperand::createReg(0)); // cc_out
8443     break;
8444   case ARM::tADDi8:
8445     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8446     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8447     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8448     // to encoding T1 if <Rd> is omitted."
8449     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8450       Inst.setOpcode(ARM::tADDi3);
8451       return true;
8452     }
8453     break;
8454   case ARM::tSUBi8:
8455     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
8456     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
8457     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
8458     // to encoding T1 if <Rd> is omitted."
8459     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
8460       Inst.setOpcode(ARM::tSUBi3);
8461       return true;
8462     }
8463     break;
8464   case ARM::t2ADDri:
8465   case ARM::t2SUBri: {
8466     // If the destination and first source operand are the same, and
8467     // the flags are compatible with the current IT status, use encoding T2
8468     // instead of T3. For compatibility with the system 'as'. Make sure the
8469     // wide encoding wasn't explicit.
8470     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
8471         !isARMLowRegister(Inst.getOperand(0).getReg()) ||
8472         (Inst.getOperand(2).isImm() &&
8473          (unsigned)Inst.getOperand(2).getImm() > 255) ||
8474         Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) ||
8475         HasWideQualifier)
8476       break;
8477     MCInst TmpInst;
8478     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
8479                       ARM::tADDi8 : ARM::tSUBi8);
8480     TmpInst.addOperand(Inst.getOperand(0));
8481     TmpInst.addOperand(Inst.getOperand(5));
8482     TmpInst.addOperand(Inst.getOperand(0));
8483     TmpInst.addOperand(Inst.getOperand(2));
8484     TmpInst.addOperand(Inst.getOperand(3));
8485     TmpInst.addOperand(Inst.getOperand(4));
8486     Inst = TmpInst;
8487     return true;
8488   }
8489   case ARM::t2ADDrr: {
8490     // If the destination and first source operand are the same, and
8491     // there's no setting of the flags, use encoding T2 instead of T3.
8492     // Note that this is only for ADD, not SUB. This mirrors the system
8493     // 'as' behaviour.  Also take advantage of ADD being commutative.
8494     // Make sure the wide encoding wasn't explicit.
8495     bool Swap = false;
8496     auto DestReg = Inst.getOperand(0).getReg();
8497     bool Transform = DestReg == Inst.getOperand(1).getReg();
8498     if (!Transform && DestReg == Inst.getOperand(2).getReg()) {
8499       Transform = true;
8500       Swap = true;
8501     }
8502     if (!Transform ||
8503         Inst.getOperand(5).getReg() != 0 ||
8504         HasWideQualifier)
8505       break;
8506     MCInst TmpInst;
8507     TmpInst.setOpcode(ARM::tADDhirr);
8508     TmpInst.addOperand(Inst.getOperand(0));
8509     TmpInst.addOperand(Inst.getOperand(0));
8510     TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2));
8511     TmpInst.addOperand(Inst.getOperand(3));
8512     TmpInst.addOperand(Inst.getOperand(4));
8513     Inst = TmpInst;
8514     return true;
8515   }
8516   case ARM::tADDrSP:
8517     // If the non-SP source operand and the destination operand are not the
8518     // same, we need to use the 32-bit encoding if it's available.
8519     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
8520       Inst.setOpcode(ARM::t2ADDrr);
8521       Inst.addOperand(MCOperand::createReg(0)); // cc_out
8522       return true;
8523     }
8524     break;
8525   case ARM::tB:
8526     // A Thumb conditional branch outside of an IT block is a tBcc.
8527     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
8528       Inst.setOpcode(ARM::tBcc);
8529       return true;
8530     }
8531     break;
8532   case ARM::t2B:
8533     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
8534     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
8535       Inst.setOpcode(ARM::t2Bcc);
8536       return true;
8537     }
8538     break;
8539   case ARM::t2Bcc:
8540     // If the conditional is AL or we're in an IT block, we really want t2B.
8541     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
8542       Inst.setOpcode(ARM::t2B);
8543       return true;
8544     }
8545     break;
8546   case ARM::tBcc:
8547     // If the conditional is AL, we really want tB.
8548     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
8549       Inst.setOpcode(ARM::tB);
8550       return true;
8551     }
8552     break;
8553   case ARM::tLDMIA: {
8554     // If the register list contains any high registers, or if the writeback
8555     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
8556     // instead if we're in Thumb2. Otherwise, this should have generated
8557     // an error in validateInstruction().
8558     unsigned Rn = Inst.getOperand(0).getReg();
8559     bool hasWritebackToken =
8560         (static_cast<ARMOperand &>(*Operands[3]).isToken() &&
8561          static_cast<ARMOperand &>(*Operands[3]).getToken() == "!");
8562     bool listContainsBase;
8563     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
8564         (!listContainsBase && !hasWritebackToken) ||
8565         (listContainsBase && hasWritebackToken)) {
8566       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8567       assert(isThumbTwo());
8568       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
8569       // If we're switching to the updating version, we need to insert
8570       // the writeback tied operand.
8571       if (hasWritebackToken)
8572         Inst.insert(Inst.begin(),
8573                     MCOperand::createReg(Inst.getOperand(0).getReg()));
8574       return true;
8575     }
8576     break;
8577   }
8578   case ARM::tSTMIA_UPD: {
8579     // If the register list contains any high registers, we need to use
8580     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8581     // should have generated an error in validateInstruction().
8582     unsigned Rn = Inst.getOperand(0).getReg();
8583     bool listContainsBase;
8584     if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
8585       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
8586       assert(isThumbTwo());
8587       Inst.setOpcode(ARM::t2STMIA_UPD);
8588       return true;
8589     }
8590     break;
8591   }
8592   case ARM::tPOP: {
8593     bool listContainsBase;
8594     // If the register list contains any high registers, we need to use
8595     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
8596     // should have generated an error in validateInstruction().
8597     if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
8598       return false;
8599     assert(isThumbTwo());
8600     Inst.setOpcode(ARM::t2LDMIA_UPD);
8601     // Add the base register and writeback operands.
8602     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8603     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8604     return true;
8605   }
8606   case ARM::tPUSH: {
8607     bool listContainsBase;
8608     if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
8609       return false;
8610     assert(isThumbTwo());
8611     Inst.setOpcode(ARM::t2STMDB_UPD);
8612     // Add the base register and writeback operands.
8613     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8614     Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP));
8615     return true;
8616   }
8617   case ARM::t2MOVi:
8618     // If we can use the 16-bit encoding and the user didn't explicitly
8619     // request the 32-bit variant, transform it here.
8620     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8621         (Inst.getOperand(1).isImm() &&
8622          (unsigned)Inst.getOperand(1).getImm() <= 255) &&
8623         Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8624         !HasWideQualifier) {
8625       // The operands aren't in the same order for tMOVi8...
8626       MCInst TmpInst;
8627       TmpInst.setOpcode(ARM::tMOVi8);
8628       TmpInst.addOperand(Inst.getOperand(0));
8629       TmpInst.addOperand(Inst.getOperand(4));
8630       TmpInst.addOperand(Inst.getOperand(1));
8631       TmpInst.addOperand(Inst.getOperand(2));
8632       TmpInst.addOperand(Inst.getOperand(3));
8633       Inst = TmpInst;
8634       return true;
8635     }
8636     break;
8637 
8638   case ARM::t2MOVr:
8639     // If we can use the 16-bit encoding and the user didn't explicitly
8640     // request the 32-bit variant, transform it here.
8641     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8642         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8643         Inst.getOperand(2).getImm() == ARMCC::AL &&
8644         Inst.getOperand(4).getReg() == ARM::CPSR &&
8645         !HasWideQualifier) {
8646       // The operands aren't the same for tMOV[S]r... (no cc_out)
8647       MCInst TmpInst;
8648       TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
8649       TmpInst.addOperand(Inst.getOperand(0));
8650       TmpInst.addOperand(Inst.getOperand(1));
8651       TmpInst.addOperand(Inst.getOperand(2));
8652       TmpInst.addOperand(Inst.getOperand(3));
8653       Inst = TmpInst;
8654       return true;
8655     }
8656     break;
8657 
8658   case ARM::t2SXTH:
8659   case ARM::t2SXTB:
8660   case ARM::t2UXTH:
8661   case ARM::t2UXTB:
8662     // If we can use the 16-bit encoding and the user didn't explicitly
8663     // request the 32-bit variant, transform it here.
8664     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
8665         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8666         Inst.getOperand(2).getImm() == 0 &&
8667         !HasWideQualifier) {
8668       unsigned NewOpc;
8669       switch (Inst.getOpcode()) {
8670       default: llvm_unreachable("Illegal opcode!");
8671       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
8672       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
8673       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
8674       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
8675       }
8676       // The operands aren't the same for thumb1 (no rotate operand).
8677       MCInst TmpInst;
8678       TmpInst.setOpcode(NewOpc);
8679       TmpInst.addOperand(Inst.getOperand(0));
8680       TmpInst.addOperand(Inst.getOperand(1));
8681       TmpInst.addOperand(Inst.getOperand(3));
8682       TmpInst.addOperand(Inst.getOperand(4));
8683       Inst = TmpInst;
8684       return true;
8685     }
8686     break;
8687 
8688   case ARM::MOVsi: {
8689     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
8690     // rrx shifts and asr/lsr of #32 is encoded as 0
8691     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
8692       return false;
8693     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
8694       // Shifting by zero is accepted as a vanilla 'MOVr'
8695       MCInst TmpInst;
8696       TmpInst.setOpcode(ARM::MOVr);
8697       TmpInst.addOperand(Inst.getOperand(0));
8698       TmpInst.addOperand(Inst.getOperand(1));
8699       TmpInst.addOperand(Inst.getOperand(3));
8700       TmpInst.addOperand(Inst.getOperand(4));
8701       TmpInst.addOperand(Inst.getOperand(5));
8702       Inst = TmpInst;
8703       return true;
8704     }
8705     return false;
8706   }
8707   case ARM::ANDrsi:
8708   case ARM::ORRrsi:
8709   case ARM::EORrsi:
8710   case ARM::BICrsi:
8711   case ARM::SUBrsi:
8712   case ARM::ADDrsi: {
8713     unsigned newOpc;
8714     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
8715     if (SOpc == ARM_AM::rrx) return false;
8716     switch (Inst.getOpcode()) {
8717     default: llvm_unreachable("unexpected opcode!");
8718     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
8719     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
8720     case ARM::EORrsi: newOpc = ARM::EORrr; break;
8721     case ARM::BICrsi: newOpc = ARM::BICrr; break;
8722     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
8723     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
8724     }
8725     // If the shift is by zero, use the non-shifted instruction definition.
8726     // The exception is for right shifts, where 0 == 32
8727     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
8728         !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
8729       MCInst TmpInst;
8730       TmpInst.setOpcode(newOpc);
8731       TmpInst.addOperand(Inst.getOperand(0));
8732       TmpInst.addOperand(Inst.getOperand(1));
8733       TmpInst.addOperand(Inst.getOperand(2));
8734       TmpInst.addOperand(Inst.getOperand(4));
8735       TmpInst.addOperand(Inst.getOperand(5));
8736       TmpInst.addOperand(Inst.getOperand(6));
8737       Inst = TmpInst;
8738       return true;
8739     }
8740     return false;
8741   }
8742   case ARM::ITasm:
8743   case ARM::t2IT: {
8744     MCOperand &MO = Inst.getOperand(1);
8745     unsigned Mask = MO.getImm();
8746     ARMCC::CondCodes Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
8747 
8748     // Set up the IT block state according to the IT instruction we just
8749     // matched.
8750     assert(!inITBlock() && "nested IT blocks?!");
8751     startExplicitITBlock(Cond, Mask);
8752     MO.setImm(getITMaskEncoding());
8753     break;
8754   }
8755   case ARM::t2LSLrr:
8756   case ARM::t2LSRrr:
8757   case ARM::t2ASRrr:
8758   case ARM::t2SBCrr:
8759   case ARM::t2RORrr:
8760   case ARM::t2BICrr:
8761     // Assemblers should use the narrow encodings of these instructions when permissible.
8762     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
8763          isARMLowRegister(Inst.getOperand(2).getReg())) &&
8764         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
8765         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8766         !HasWideQualifier) {
8767       unsigned NewOpc;
8768       switch (Inst.getOpcode()) {
8769         default: llvm_unreachable("unexpected opcode");
8770         case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
8771         case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
8772         case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
8773         case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
8774         case ARM::t2RORrr: NewOpc = ARM::tROR; break;
8775         case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
8776       }
8777       MCInst TmpInst;
8778       TmpInst.setOpcode(NewOpc);
8779       TmpInst.addOperand(Inst.getOperand(0));
8780       TmpInst.addOperand(Inst.getOperand(5));
8781       TmpInst.addOperand(Inst.getOperand(1));
8782       TmpInst.addOperand(Inst.getOperand(2));
8783       TmpInst.addOperand(Inst.getOperand(3));
8784       TmpInst.addOperand(Inst.getOperand(4));
8785       Inst = TmpInst;
8786       return true;
8787     }
8788     return false;
8789 
8790   case ARM::t2ANDrr:
8791   case ARM::t2EORrr:
8792   case ARM::t2ADCrr:
8793   case ARM::t2ORRrr:
8794     // Assemblers should use the narrow encodings of these instructions when permissible.
8795     // These instructions are special in that they are commutable, so shorter encodings
8796     // are available more often.
8797     if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
8798          isARMLowRegister(Inst.getOperand(2).getReg())) &&
8799         (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
8800          Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
8801         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
8802         !HasWideQualifier) {
8803       unsigned NewOpc;
8804       switch (Inst.getOpcode()) {
8805         default: llvm_unreachable("unexpected opcode");
8806         case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
8807         case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
8808         case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
8809         case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
8810       }
8811       MCInst TmpInst;
8812       TmpInst.setOpcode(NewOpc);
8813       TmpInst.addOperand(Inst.getOperand(0));
8814       TmpInst.addOperand(Inst.getOperand(5));
8815       if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
8816         TmpInst.addOperand(Inst.getOperand(1));
8817         TmpInst.addOperand(Inst.getOperand(2));
8818       } else {
8819         TmpInst.addOperand(Inst.getOperand(2));
8820         TmpInst.addOperand(Inst.getOperand(1));
8821       }
8822       TmpInst.addOperand(Inst.getOperand(3));
8823       TmpInst.addOperand(Inst.getOperand(4));
8824       Inst = TmpInst;
8825       return true;
8826     }
8827     return false;
8828   }
8829   return false;
8830 }
8831 
8832 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
8833   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
8834   // suffix depending on whether they're in an IT block or not.
8835   unsigned Opc = Inst.getOpcode();
8836   const MCInstrDesc &MCID = MII.get(Opc);
8837   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
8838     assert(MCID.hasOptionalDef() &&
8839            "optionally flag setting instruction missing optional def operand");
8840     assert(MCID.NumOperands == Inst.getNumOperands() &&
8841            "operand count mismatch!");
8842     // Find the optional-def operand (cc_out).
8843     unsigned OpNo;
8844     for (OpNo = 0;
8845          !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
8846          ++OpNo)
8847       ;
8848     // If we're parsing Thumb1, reject it completely.
8849     if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
8850       return Match_RequiresFlagSetting;
8851     // If we're parsing Thumb2, which form is legal depends on whether we're
8852     // in an IT block.
8853     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
8854         !inITBlock())
8855       return Match_RequiresITBlock;
8856     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
8857         inITBlock())
8858       return Match_RequiresNotITBlock;
8859     // LSL with zero immediate is not allowed in an IT block
8860     if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock())
8861       return Match_RequiresNotITBlock;
8862   } else if (isThumbOne()) {
8863     // Some high-register supporting Thumb1 encodings only allow both registers
8864     // to be from r0-r7 when in Thumb2.
8865     if (Opc == ARM::tADDhirr && !hasV6MOps() &&
8866         isARMLowRegister(Inst.getOperand(1).getReg()) &&
8867         isARMLowRegister(Inst.getOperand(2).getReg()))
8868       return Match_RequiresThumb2;
8869     // Others only require ARMv6 or later.
8870     else if (Opc == ARM::tMOVr && !hasV6Ops() &&
8871              isARMLowRegister(Inst.getOperand(0).getReg()) &&
8872              isARMLowRegister(Inst.getOperand(1).getReg()))
8873       return Match_RequiresV6;
8874   }
8875 
8876   // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex
8877   // than the loop below can handle, so it uses the GPRnopc register class and
8878   // we do SP handling here.
8879   if (Opc == ARM::t2MOVr && !hasV8Ops())
8880   {
8881     // SP as both source and destination is not allowed
8882     if (Inst.getOperand(0).getReg() == ARM::SP &&
8883         Inst.getOperand(1).getReg() == ARM::SP)
8884       return Match_RequiresV8;
8885     // When flags-setting SP as either source or destination is not allowed
8886     if (Inst.getOperand(4).getReg() == ARM::CPSR &&
8887         (Inst.getOperand(0).getReg() == ARM::SP ||
8888          Inst.getOperand(1).getReg() == ARM::SP))
8889       return Match_RequiresV8;
8890   }
8891 
8892   // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of
8893   // ARMv8-A.
8894   if ((Inst.getOpcode() == ARM::VMRS || Inst.getOpcode() == ARM::VMSR) &&
8895       Inst.getOperand(0).getReg() == ARM::SP && (isThumb() && !hasV8Ops()))
8896     return Match_InvalidOperand;
8897 
8898   for (unsigned I = 0; I < MCID.NumOperands; ++I)
8899     if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) {
8900       // rGPRRegClass excludes PC, and also excluded SP before ARMv8
8901       if ((Inst.getOperand(I).getReg() == ARM::SP) && !hasV8Ops())
8902         return Match_RequiresV8;
8903       else if (Inst.getOperand(I).getReg() == ARM::PC)
8904         return Match_InvalidOperand;
8905     }
8906 
8907   return Match_Success;
8908 }
8909 
8910 namespace llvm {
8911 
8912 template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) {
8913   return true; // In an assembly source, no need to second-guess
8914 }
8915 
8916 } // end namespace llvm
8917 
8918 // Returns true if Inst is unpredictable if it is in and IT block, but is not
8919 // the last instruction in the block.
8920 bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const {
8921   const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
8922 
8923   // All branch & call instructions terminate IT blocks with the exception of
8924   // SVC.
8925   if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) ||
8926       MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch())
8927     return true;
8928 
8929   // Any arithmetic instruction which writes to the PC also terminates the IT
8930   // block.
8931   for (unsigned OpIdx = 0; OpIdx < MCID.getNumDefs(); ++OpIdx) {
8932     MCOperand &Op = Inst.getOperand(OpIdx);
8933     if (Op.isReg() && Op.getReg() == ARM::PC)
8934       return true;
8935   }
8936 
8937   if (MCID.hasImplicitDefOfPhysReg(ARM::PC, MRI))
8938     return true;
8939 
8940   // Instructions with variable operand lists, which write to the variable
8941   // operands. We only care about Thumb instructions here, as ARM instructions
8942   // obviously can't be in an IT block.
8943   switch (Inst.getOpcode()) {
8944   case ARM::tLDMIA:
8945   case ARM::t2LDMIA:
8946   case ARM::t2LDMIA_UPD:
8947   case ARM::t2LDMDB:
8948   case ARM::t2LDMDB_UPD:
8949     if (listContainsReg(Inst, 3, ARM::PC))
8950       return true;
8951     break;
8952   case ARM::tPOP:
8953     if (listContainsReg(Inst, 2, ARM::PC))
8954       return true;
8955     break;
8956   }
8957 
8958   return false;
8959 }
8960 
8961 unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst,
8962                                           SmallVectorImpl<NearMissInfo> &NearMisses,
8963                                           bool MatchingInlineAsm,
8964                                           bool &EmitInITBlock,
8965                                           MCStreamer &Out) {
8966   // If we can't use an implicit IT block here, just match as normal.
8967   if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb())
8968     return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
8969 
8970   // Try to match the instruction in an extension of the current IT block (if
8971   // there is one).
8972   if (inImplicitITBlock()) {
8973     extendImplicitITBlock(ITState.Cond);
8974     if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
8975             Match_Success) {
8976       // The match succeded, but we still have to check that the instruction is
8977       // valid in this implicit IT block.
8978       const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
8979       if (MCID.isPredicable()) {
8980         ARMCC::CondCodes InstCond =
8981             (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
8982                 .getImm();
8983         ARMCC::CondCodes ITCond = currentITCond();
8984         if (InstCond == ITCond) {
8985           EmitInITBlock = true;
8986           return Match_Success;
8987         } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) {
8988           invertCurrentITCondition();
8989           EmitInITBlock = true;
8990           return Match_Success;
8991         }
8992       }
8993     }
8994     rewindImplicitITPosition();
8995   }
8996 
8997   // Finish the current IT block, and try to match outside any IT block.
8998   flushPendingInstructions(Out);
8999   unsigned PlainMatchResult =
9000       MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm);
9001   if (PlainMatchResult == Match_Success) {
9002     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9003     if (MCID.isPredicable()) {
9004       ARMCC::CondCodes InstCond =
9005           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9006               .getImm();
9007       // Some forms of the branch instruction have their own condition code
9008       // fields, so can be conditionally executed without an IT block.
9009       if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) {
9010         EmitInITBlock = false;
9011         return Match_Success;
9012       }
9013       if (InstCond == ARMCC::AL) {
9014         EmitInITBlock = false;
9015         return Match_Success;
9016       }
9017     } else {
9018       EmitInITBlock = false;
9019       return Match_Success;
9020     }
9021   }
9022 
9023   // Try to match in a new IT block. The matcher doesn't check the actual
9024   // condition, so we create an IT block with a dummy condition, and fix it up
9025   // once we know the actual condition.
9026   startImplicitITBlock();
9027   if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) ==
9028       Match_Success) {
9029     const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
9030     if (MCID.isPredicable()) {
9031       ITState.Cond =
9032           (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx())
9033               .getImm();
9034       EmitInITBlock = true;
9035       return Match_Success;
9036     }
9037   }
9038   discardImplicitITBlock();
9039 
9040   // If none of these succeed, return the error we got when trying to match
9041   // outside any IT blocks.
9042   EmitInITBlock = false;
9043   return PlainMatchResult;
9044 }
9045 
9046 std::string ARMMnemonicSpellCheck(StringRef S, uint64_t FBS);
9047 
9048 static const char *getSubtargetFeatureName(uint64_t Val);
9049 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
9050                                            OperandVector &Operands,
9051                                            MCStreamer &Out, uint64_t &ErrorInfo,
9052                                            bool MatchingInlineAsm) {
9053   MCInst Inst;
9054   unsigned MatchResult;
9055   bool PendConditionalInstruction = false;
9056 
9057   SmallVector<NearMissInfo, 4> NearMisses;
9058   MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm,
9059                                  PendConditionalInstruction, Out);
9060 
9061   switch (MatchResult) {
9062   case Match_Success:
9063     // Context sensitive operand constraints aren't handled by the matcher,
9064     // so check them here.
9065     if (validateInstruction(Inst, Operands)) {
9066       // Still progress the IT block, otherwise one wrong condition causes
9067       // nasty cascading errors.
9068       forwardITPosition();
9069       return true;
9070     }
9071 
9072     { // processInstruction() updates inITBlock state, we need to save it away
9073       bool wasInITBlock = inITBlock();
9074 
9075       // Some instructions need post-processing to, for example, tweak which
9076       // encoding is selected. Loop on it while changes happen so the
9077       // individual transformations can chain off each other. E.g.,
9078       // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
9079       while (processInstruction(Inst, Operands, Out))
9080         ;
9081 
9082       // Only after the instruction is fully processed, we can validate it
9083       if (wasInITBlock && hasV8Ops() && isThumb() &&
9084           !isV8EligibleForIT(&Inst)) {
9085         Warning(IDLoc, "deprecated instruction in IT block");
9086       }
9087     }
9088 
9089     // Only move forward at the very end so that everything in validate
9090     // and process gets a consistent answer about whether we're in an IT
9091     // block.
9092     forwardITPosition();
9093 
9094     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
9095     // doesn't actually encode.
9096     if (Inst.getOpcode() == ARM::ITasm)
9097       return false;
9098 
9099     Inst.setLoc(IDLoc);
9100     if (PendConditionalInstruction) {
9101       PendingConditionalInsts.push_back(Inst);
9102       if (isITBlockFull() || isITBlockTerminator(Inst))
9103         flushPendingInstructions(Out);
9104     } else {
9105       Out.EmitInstruction(Inst, getSTI());
9106     }
9107     return false;
9108   case Match_NearMisses:
9109     ReportNearMisses(NearMisses, IDLoc, Operands);
9110     return true;
9111   case Match_MnemonicFail: {
9112     uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
9113     std::string Suggestion = ARMMnemonicSpellCheck(
9114       ((ARMOperand &)*Operands[0]).getToken(), FBS);
9115     return Error(IDLoc, "invalid instruction" + Suggestion,
9116                  ((ARMOperand &)*Operands[0]).getLocRange());
9117   }
9118   }
9119 
9120   llvm_unreachable("Implement any new match types added!");
9121 }
9122 
9123 /// parseDirective parses the arm specific directives
9124 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
9125   const MCObjectFileInfo::Environment Format =
9126     getContext().getObjectFileInfo()->getObjectFileType();
9127   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
9128   bool IsCOFF = Format == MCObjectFileInfo::IsCOFF;
9129 
9130   StringRef IDVal = DirectiveID.getIdentifier();
9131   if (IDVal == ".word")
9132     parseLiteralValues(4, DirectiveID.getLoc());
9133   else if (IDVal == ".short" || IDVal == ".hword")
9134     parseLiteralValues(2, DirectiveID.getLoc());
9135   else if (IDVal == ".thumb")
9136     parseDirectiveThumb(DirectiveID.getLoc());
9137   else if (IDVal == ".arm")
9138     parseDirectiveARM(DirectiveID.getLoc());
9139   else if (IDVal == ".thumb_func")
9140     parseDirectiveThumbFunc(DirectiveID.getLoc());
9141   else if (IDVal == ".code")
9142     parseDirectiveCode(DirectiveID.getLoc());
9143   else if (IDVal == ".syntax")
9144     parseDirectiveSyntax(DirectiveID.getLoc());
9145   else if (IDVal == ".unreq")
9146     parseDirectiveUnreq(DirectiveID.getLoc());
9147   else if (IDVal == ".fnend")
9148     parseDirectiveFnEnd(DirectiveID.getLoc());
9149   else if (IDVal == ".cantunwind")
9150     parseDirectiveCantUnwind(DirectiveID.getLoc());
9151   else if (IDVal == ".personality")
9152     parseDirectivePersonality(DirectiveID.getLoc());
9153   else if (IDVal == ".handlerdata")
9154     parseDirectiveHandlerData(DirectiveID.getLoc());
9155   else if (IDVal == ".setfp")
9156     parseDirectiveSetFP(DirectiveID.getLoc());
9157   else if (IDVal == ".pad")
9158     parseDirectivePad(DirectiveID.getLoc());
9159   else if (IDVal == ".save")
9160     parseDirectiveRegSave(DirectiveID.getLoc(), false);
9161   else if (IDVal == ".vsave")
9162     parseDirectiveRegSave(DirectiveID.getLoc(), true);
9163   else if (IDVal == ".ltorg" || IDVal == ".pool")
9164     parseDirectiveLtorg(DirectiveID.getLoc());
9165   else if (IDVal == ".even")
9166     parseDirectiveEven(DirectiveID.getLoc());
9167   else if (IDVal == ".personalityindex")
9168     parseDirectivePersonalityIndex(DirectiveID.getLoc());
9169   else if (IDVal == ".unwind_raw")
9170     parseDirectiveUnwindRaw(DirectiveID.getLoc());
9171   else if (IDVal == ".movsp")
9172     parseDirectiveMovSP(DirectiveID.getLoc());
9173   else if (IDVal == ".arch_extension")
9174     parseDirectiveArchExtension(DirectiveID.getLoc());
9175   else if (IDVal == ".align")
9176     return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure.
9177   else if (IDVal == ".thumb_set")
9178     parseDirectiveThumbSet(DirectiveID.getLoc());
9179   else if (!IsMachO && !IsCOFF) {
9180     if (IDVal == ".arch")
9181       parseDirectiveArch(DirectiveID.getLoc());
9182     else if (IDVal == ".cpu")
9183       parseDirectiveCPU(DirectiveID.getLoc());
9184     else if (IDVal == ".eabi_attribute")
9185       parseDirectiveEabiAttr(DirectiveID.getLoc());
9186     else if (IDVal == ".fpu")
9187       parseDirectiveFPU(DirectiveID.getLoc());
9188     else if (IDVal == ".fnstart")
9189       parseDirectiveFnStart(DirectiveID.getLoc());
9190     else if (IDVal == ".inst")
9191       parseDirectiveInst(DirectiveID.getLoc());
9192     else if (IDVal == ".inst.n")
9193       parseDirectiveInst(DirectiveID.getLoc(), 'n');
9194     else if (IDVal == ".inst.w")
9195       parseDirectiveInst(DirectiveID.getLoc(), 'w');
9196     else if (IDVal == ".object_arch")
9197       parseDirectiveObjectArch(DirectiveID.getLoc());
9198     else if (IDVal == ".tlsdescseq")
9199       parseDirectiveTLSDescSeq(DirectiveID.getLoc());
9200     else
9201       return true;
9202   } else
9203     return true;
9204   return false;
9205 }
9206 
9207 /// parseLiteralValues
9208 ///  ::= .hword expression [, expression]*
9209 ///  ::= .short expression [, expression]*
9210 ///  ::= .word expression [, expression]*
9211 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) {
9212   auto parseOne = [&]() -> bool {
9213     const MCExpr *Value;
9214     if (getParser().parseExpression(Value))
9215       return true;
9216     getParser().getStreamer().EmitValue(Value, Size, L);
9217     return false;
9218   };
9219   return (parseMany(parseOne));
9220 }
9221 
9222 /// parseDirectiveThumb
9223 ///  ::= .thumb
9224 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
9225   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
9226       check(!hasThumb(), L, "target does not support Thumb mode"))
9227     return true;
9228 
9229   if (!isThumb())
9230     SwitchMode();
9231 
9232   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
9233   return false;
9234 }
9235 
9236 /// parseDirectiveARM
9237 ///  ::= .arm
9238 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
9239   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") ||
9240       check(!hasARM(), L, "target does not support ARM mode"))
9241     return true;
9242 
9243   if (isThumb())
9244     SwitchMode();
9245   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
9246   return false;
9247 }
9248 
9249 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) {
9250   // We need to flush the current implicit IT block on a label, because it is
9251   // not legal to branch into an IT block.
9252   flushPendingInstructions(getStreamer());
9253   if (NextSymbolIsThumb) {
9254     getParser().getStreamer().EmitThumbFunc(Symbol);
9255     NextSymbolIsThumb = false;
9256   }
9257 }
9258 
9259 /// parseDirectiveThumbFunc
9260 ///  ::= .thumbfunc symbol_name
9261 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
9262   MCAsmParser &Parser = getParser();
9263   const auto Format = getContext().getObjectFileInfo()->getObjectFileType();
9264   bool IsMachO = Format == MCObjectFileInfo::IsMachO;
9265 
9266   // Darwin asm has (optionally) function name after .thumb_func direction
9267   // ELF doesn't
9268 
9269   if (IsMachO) {
9270     if (Parser.getTok().is(AsmToken::Identifier) ||
9271         Parser.getTok().is(AsmToken::String)) {
9272       MCSymbol *Func = getParser().getContext().getOrCreateSymbol(
9273           Parser.getTok().getIdentifier());
9274       getParser().getStreamer().EmitThumbFunc(Func);
9275       Parser.Lex();
9276       if (parseToken(AsmToken::EndOfStatement,
9277                      "unexpected token in '.thumb_func' directive"))
9278         return true;
9279       return false;
9280     }
9281   }
9282 
9283   if (parseToken(AsmToken::EndOfStatement,
9284                  "unexpected token in '.thumb_func' directive"))
9285     return true;
9286 
9287   NextSymbolIsThumb = true;
9288   return false;
9289 }
9290 
9291 /// parseDirectiveSyntax
9292 ///  ::= .syntax unified | divided
9293 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
9294   MCAsmParser &Parser = getParser();
9295   const AsmToken &Tok = Parser.getTok();
9296   if (Tok.isNot(AsmToken::Identifier)) {
9297     Error(L, "unexpected token in .syntax directive");
9298     return false;
9299   }
9300 
9301   StringRef Mode = Tok.getString();
9302   Parser.Lex();
9303   if (check(Mode == "divided" || Mode == "DIVIDED", L,
9304             "'.syntax divided' arm assembly not supported") ||
9305       check(Mode != "unified" && Mode != "UNIFIED", L,
9306             "unrecognized syntax mode in .syntax directive") ||
9307       parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
9308     return true;
9309 
9310   // TODO tell the MC streamer the mode
9311   // getParser().getStreamer().Emit???();
9312   return false;
9313 }
9314 
9315 /// parseDirectiveCode
9316 ///  ::= .code 16 | 32
9317 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
9318   MCAsmParser &Parser = getParser();
9319   const AsmToken &Tok = Parser.getTok();
9320   if (Tok.isNot(AsmToken::Integer))
9321     return Error(L, "unexpected token in .code directive");
9322   int64_t Val = Parser.getTok().getIntVal();
9323   if (Val != 16 && Val != 32) {
9324     Error(L, "invalid operand to .code directive");
9325     return false;
9326   }
9327   Parser.Lex();
9328 
9329   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
9330     return true;
9331 
9332   if (Val == 16) {
9333     if (!hasThumb())
9334       return Error(L, "target does not support Thumb mode");
9335 
9336     if (!isThumb())
9337       SwitchMode();
9338     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
9339   } else {
9340     if (!hasARM())
9341       return Error(L, "target does not support ARM mode");
9342 
9343     if (isThumb())
9344       SwitchMode();
9345     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
9346   }
9347 
9348   return false;
9349 }
9350 
9351 /// parseDirectiveReq
9352 ///  ::= name .req registername
9353 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
9354   MCAsmParser &Parser = getParser();
9355   Parser.Lex(); // Eat the '.req' token.
9356   unsigned Reg;
9357   SMLoc SRegLoc, ERegLoc;
9358   if (check(ParseRegister(Reg, SRegLoc, ERegLoc), SRegLoc,
9359             "register name expected") ||
9360       parseToken(AsmToken::EndOfStatement,
9361                  "unexpected input in .req directive."))
9362     return true;
9363 
9364   if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg)
9365     return Error(SRegLoc,
9366                  "redefinition of '" + Name + "' does not match original.");
9367 
9368   return false;
9369 }
9370 
9371 /// parseDirectiveUneq
9372 ///  ::= .unreq registername
9373 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
9374   MCAsmParser &Parser = getParser();
9375   if (Parser.getTok().isNot(AsmToken::Identifier))
9376     return Error(L, "unexpected input in .unreq directive.");
9377   RegisterReqs.erase(Parser.getTok().getIdentifier().lower());
9378   Parser.Lex(); // Eat the identifier.
9379   if (parseToken(AsmToken::EndOfStatement,
9380                  "unexpected input in '.unreq' directive"))
9381     return true;
9382   return false;
9383 }
9384 
9385 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was
9386 // before, if supported by the new target, or emit mapping symbols for the mode
9387 // switch.
9388 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) {
9389   if (WasThumb != isThumb()) {
9390     if (WasThumb && hasThumb()) {
9391       // Stay in Thumb mode
9392       SwitchMode();
9393     } else if (!WasThumb && hasARM()) {
9394       // Stay in ARM mode
9395       SwitchMode();
9396     } else {
9397       // Mode switch forced, because the new arch doesn't support the old mode.
9398       getParser().getStreamer().EmitAssemblerFlag(isThumb() ? MCAF_Code16
9399                                                             : MCAF_Code32);
9400       // Warn about the implcit mode switch. GAS does not switch modes here,
9401       // but instead stays in the old mode, reporting an error on any following
9402       // instructions as the mode does not exist on the target.
9403       Warning(Loc, Twine("new target does not support ") +
9404                        (WasThumb ? "thumb" : "arm") + " mode, switching to " +
9405                        (!WasThumb ? "thumb" : "arm") + " mode");
9406     }
9407   }
9408 }
9409 
9410 /// parseDirectiveArch
9411 ///  ::= .arch token
9412 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
9413   StringRef Arch = getParser().parseStringToEndOfStatement().trim();
9414   ARM::ArchKind ID = ARM::parseArch(Arch);
9415 
9416   if (ID == ARM::ArchKind::INVALID)
9417     return Error(L, "Unknown arch name");
9418 
9419   bool WasThumb = isThumb();
9420   Triple T;
9421   MCSubtargetInfo &STI = copySTI();
9422   STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str());
9423   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9424   FixModeAfterArchChange(WasThumb, L);
9425 
9426   getTargetStreamer().emitArch(ID);
9427   return false;
9428 }
9429 
9430 /// parseDirectiveEabiAttr
9431 ///  ::= .eabi_attribute int, int [, "str"]
9432 ///  ::= .eabi_attribute Tag_name, int [, "str"]
9433 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
9434   MCAsmParser &Parser = getParser();
9435   int64_t Tag;
9436   SMLoc TagLoc;
9437   TagLoc = Parser.getTok().getLoc();
9438   if (Parser.getTok().is(AsmToken::Identifier)) {
9439     StringRef Name = Parser.getTok().getIdentifier();
9440     Tag = ARMBuildAttrs::AttrTypeFromString(Name);
9441     if (Tag == -1) {
9442       Error(TagLoc, "attribute name not recognised: " + Name);
9443       return false;
9444     }
9445     Parser.Lex();
9446   } else {
9447     const MCExpr *AttrExpr;
9448 
9449     TagLoc = Parser.getTok().getLoc();
9450     if (Parser.parseExpression(AttrExpr))
9451       return true;
9452 
9453     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr);
9454     if (check(!CE, TagLoc, "expected numeric constant"))
9455       return true;
9456 
9457     Tag = CE->getValue();
9458   }
9459 
9460   if (Parser.parseToken(AsmToken::Comma, "comma expected"))
9461     return true;
9462 
9463   StringRef StringValue = "";
9464   bool IsStringValue = false;
9465 
9466   int64_t IntegerValue = 0;
9467   bool IsIntegerValue = false;
9468 
9469   if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name)
9470     IsStringValue = true;
9471   else if (Tag == ARMBuildAttrs::compatibility) {
9472     IsStringValue = true;
9473     IsIntegerValue = true;
9474   } else if (Tag < 32 || Tag % 2 == 0)
9475     IsIntegerValue = true;
9476   else if (Tag % 2 == 1)
9477     IsStringValue = true;
9478   else
9479     llvm_unreachable("invalid tag type");
9480 
9481   if (IsIntegerValue) {
9482     const MCExpr *ValueExpr;
9483     SMLoc ValueExprLoc = Parser.getTok().getLoc();
9484     if (Parser.parseExpression(ValueExpr))
9485       return true;
9486 
9487     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr);
9488     if (!CE)
9489       return Error(ValueExprLoc, "expected numeric constant");
9490     IntegerValue = CE->getValue();
9491   }
9492 
9493   if (Tag == ARMBuildAttrs::compatibility) {
9494     if (Parser.parseToken(AsmToken::Comma, "comma expected"))
9495       return true;
9496   }
9497 
9498   if (IsStringValue) {
9499     if (Parser.getTok().isNot(AsmToken::String))
9500       return Error(Parser.getTok().getLoc(), "bad string constant");
9501 
9502     StringValue = Parser.getTok().getStringContents();
9503     Parser.Lex();
9504   }
9505 
9506   if (Parser.parseToken(AsmToken::EndOfStatement,
9507                         "unexpected token in '.eabi_attribute' directive"))
9508     return true;
9509 
9510   if (IsIntegerValue && IsStringValue) {
9511     assert(Tag == ARMBuildAttrs::compatibility);
9512     getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue);
9513   } else if (IsIntegerValue)
9514     getTargetStreamer().emitAttribute(Tag, IntegerValue);
9515   else if (IsStringValue)
9516     getTargetStreamer().emitTextAttribute(Tag, StringValue);
9517   return false;
9518 }
9519 
9520 /// parseDirectiveCPU
9521 ///  ::= .cpu str
9522 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) {
9523   StringRef CPU = getParser().parseStringToEndOfStatement().trim();
9524   getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU);
9525 
9526   // FIXME: This is using table-gen data, but should be moved to
9527   // ARMTargetParser once that is table-gen'd.
9528   if (!getSTI().isCPUStringValid(CPU))
9529     return Error(L, "Unknown CPU name");
9530 
9531   bool WasThumb = isThumb();
9532   MCSubtargetInfo &STI = copySTI();
9533   STI.setDefaultFeatures(CPU, "");
9534   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9535   FixModeAfterArchChange(WasThumb, L);
9536 
9537   return false;
9538 }
9539 
9540 /// parseDirectiveFPU
9541 ///  ::= .fpu str
9542 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) {
9543   SMLoc FPUNameLoc = getTok().getLoc();
9544   StringRef FPU = getParser().parseStringToEndOfStatement().trim();
9545 
9546   unsigned ID = ARM::parseFPU(FPU);
9547   std::vector<StringRef> Features;
9548   if (!ARM::getFPUFeatures(ID, Features))
9549     return Error(FPUNameLoc, "Unknown FPU name");
9550 
9551   MCSubtargetInfo &STI = copySTI();
9552   for (auto Feature : Features)
9553     STI.ApplyFeatureFlag(Feature);
9554   setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
9555 
9556   getTargetStreamer().emitFPU(ID);
9557   return false;
9558 }
9559 
9560 /// parseDirectiveFnStart
9561 ///  ::= .fnstart
9562 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) {
9563   if (parseToken(AsmToken::EndOfStatement,
9564                  "unexpected token in '.fnstart' directive"))
9565     return true;
9566 
9567   if (UC.hasFnStart()) {
9568     Error(L, ".fnstart starts before the end of previous one");
9569     UC.emitFnStartLocNotes();
9570     return true;
9571   }
9572 
9573   // Reset the unwind directives parser state
9574   UC.reset();
9575 
9576   getTargetStreamer().emitFnStart();
9577 
9578   UC.recordFnStart(L);
9579   return false;
9580 }
9581 
9582 /// parseDirectiveFnEnd
9583 ///  ::= .fnend
9584 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) {
9585   if (parseToken(AsmToken::EndOfStatement,
9586                  "unexpected token in '.fnend' directive"))
9587     return true;
9588   // Check the ordering of unwind directives
9589   if (!UC.hasFnStart())
9590     return Error(L, ".fnstart must precede .fnend directive");
9591 
9592   // Reset the unwind directives parser state
9593   getTargetStreamer().emitFnEnd();
9594 
9595   UC.reset();
9596   return false;
9597 }
9598 
9599 /// parseDirectiveCantUnwind
9600 ///  ::= .cantunwind
9601 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) {
9602   if (parseToken(AsmToken::EndOfStatement,
9603                  "unexpected token in '.cantunwind' directive"))
9604     return true;
9605 
9606   UC.recordCantUnwind(L);
9607   // Check the ordering of unwind directives
9608   if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive"))
9609     return true;
9610 
9611   if (UC.hasHandlerData()) {
9612     Error(L, ".cantunwind can't be used with .handlerdata directive");
9613     UC.emitHandlerDataLocNotes();
9614     return true;
9615   }
9616   if (UC.hasPersonality()) {
9617     Error(L, ".cantunwind can't be used with .personality directive");
9618     UC.emitPersonalityLocNotes();
9619     return true;
9620   }
9621 
9622   getTargetStreamer().emitCantUnwind();
9623   return false;
9624 }
9625 
9626 /// parseDirectivePersonality
9627 ///  ::= .personality name
9628 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) {
9629   MCAsmParser &Parser = getParser();
9630   bool HasExistingPersonality = UC.hasPersonality();
9631 
9632   // Parse the name of the personality routine
9633   if (Parser.getTok().isNot(AsmToken::Identifier))
9634     return Error(L, "unexpected input in .personality directive.");
9635   StringRef Name(Parser.getTok().getIdentifier());
9636   Parser.Lex();
9637 
9638   if (parseToken(AsmToken::EndOfStatement,
9639                  "unexpected token in '.personality' directive"))
9640     return true;
9641 
9642   UC.recordPersonality(L);
9643 
9644   // Check the ordering of unwind directives
9645   if (!UC.hasFnStart())
9646     return Error(L, ".fnstart must precede .personality directive");
9647   if (UC.cantUnwind()) {
9648     Error(L, ".personality can't be used with .cantunwind directive");
9649     UC.emitCantUnwindLocNotes();
9650     return true;
9651   }
9652   if (UC.hasHandlerData()) {
9653     Error(L, ".personality must precede .handlerdata directive");
9654     UC.emitHandlerDataLocNotes();
9655     return true;
9656   }
9657   if (HasExistingPersonality) {
9658     Error(L, "multiple personality directives");
9659     UC.emitPersonalityLocNotes();
9660     return true;
9661   }
9662 
9663   MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name);
9664   getTargetStreamer().emitPersonality(PR);
9665   return false;
9666 }
9667 
9668 /// parseDirectiveHandlerData
9669 ///  ::= .handlerdata
9670 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) {
9671   if (parseToken(AsmToken::EndOfStatement,
9672                  "unexpected token in '.handlerdata' directive"))
9673     return true;
9674 
9675   UC.recordHandlerData(L);
9676   // Check the ordering of unwind directives
9677   if (!UC.hasFnStart())
9678     return Error(L, ".fnstart must precede .personality directive");
9679   if (UC.cantUnwind()) {
9680     Error(L, ".handlerdata can't be used with .cantunwind directive");
9681     UC.emitCantUnwindLocNotes();
9682     return true;
9683   }
9684 
9685   getTargetStreamer().emitHandlerData();
9686   return false;
9687 }
9688 
9689 /// parseDirectiveSetFP
9690 ///  ::= .setfp fpreg, spreg [, offset]
9691 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) {
9692   MCAsmParser &Parser = getParser();
9693   // Check the ordering of unwind directives
9694   if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") ||
9695       check(UC.hasHandlerData(), L,
9696             ".setfp must precede .handlerdata directive"))
9697     return true;
9698 
9699   // Parse fpreg
9700   SMLoc FPRegLoc = Parser.getTok().getLoc();
9701   int FPReg = tryParseRegister();
9702 
9703   if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") ||
9704       Parser.parseToken(AsmToken::Comma, "comma expected"))
9705     return true;
9706 
9707   // Parse spreg
9708   SMLoc SPRegLoc = Parser.getTok().getLoc();
9709   int SPReg = tryParseRegister();
9710   if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") ||
9711       check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc,
9712             "register should be either $sp or the latest fp register"))
9713     return true;
9714 
9715   // Update the frame pointer register
9716   UC.saveFPReg(FPReg);
9717 
9718   // Parse offset
9719   int64_t Offset = 0;
9720   if (Parser.parseOptionalToken(AsmToken::Comma)) {
9721     if (Parser.getTok().isNot(AsmToken::Hash) &&
9722         Parser.getTok().isNot(AsmToken::Dollar))
9723       return Error(Parser.getTok().getLoc(), "'#' expected");
9724     Parser.Lex(); // skip hash token.
9725 
9726     const MCExpr *OffsetExpr;
9727     SMLoc ExLoc = Parser.getTok().getLoc();
9728     SMLoc EndLoc;
9729     if (getParser().parseExpression(OffsetExpr, EndLoc))
9730       return Error(ExLoc, "malformed setfp offset");
9731     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9732     if (check(!CE, ExLoc, "setfp offset must be an immediate"))
9733       return true;
9734     Offset = CE->getValue();
9735   }
9736 
9737   if (Parser.parseToken(AsmToken::EndOfStatement))
9738     return true;
9739 
9740   getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg),
9741                                 static_cast<unsigned>(SPReg), Offset);
9742   return false;
9743 }
9744 
9745 /// parseDirective
9746 ///  ::= .pad offset
9747 bool ARMAsmParser::parseDirectivePad(SMLoc L) {
9748   MCAsmParser &Parser = getParser();
9749   // Check the ordering of unwind directives
9750   if (!UC.hasFnStart())
9751     return Error(L, ".fnstart must precede .pad directive");
9752   if (UC.hasHandlerData())
9753     return Error(L, ".pad must precede .handlerdata directive");
9754 
9755   // Parse the offset
9756   if (Parser.getTok().isNot(AsmToken::Hash) &&
9757       Parser.getTok().isNot(AsmToken::Dollar))
9758     return Error(Parser.getTok().getLoc(), "'#' expected");
9759   Parser.Lex(); // skip hash token.
9760 
9761   const MCExpr *OffsetExpr;
9762   SMLoc ExLoc = Parser.getTok().getLoc();
9763   SMLoc EndLoc;
9764   if (getParser().parseExpression(OffsetExpr, EndLoc))
9765     return Error(ExLoc, "malformed pad offset");
9766   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9767   if (!CE)
9768     return Error(ExLoc, "pad offset must be an immediate");
9769 
9770   if (parseToken(AsmToken::EndOfStatement,
9771                  "unexpected token in '.pad' directive"))
9772     return true;
9773 
9774   getTargetStreamer().emitPad(CE->getValue());
9775   return false;
9776 }
9777 
9778 /// parseDirectiveRegSave
9779 ///  ::= .save  { registers }
9780 ///  ::= .vsave { registers }
9781 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) {
9782   // Check the ordering of unwind directives
9783   if (!UC.hasFnStart())
9784     return Error(L, ".fnstart must precede .save or .vsave directives");
9785   if (UC.hasHandlerData())
9786     return Error(L, ".save or .vsave must precede .handlerdata directive");
9787 
9788   // RAII object to make sure parsed operands are deleted.
9789   SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands;
9790 
9791   // Parse the register list
9792   if (parseRegisterList(Operands) ||
9793       parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
9794     return true;
9795   ARMOperand &Op = (ARMOperand &)*Operands[0];
9796   if (!IsVector && !Op.isRegList())
9797     return Error(L, ".save expects GPR registers");
9798   if (IsVector && !Op.isDPRRegList())
9799     return Error(L, ".vsave expects DPR registers");
9800 
9801   getTargetStreamer().emitRegSave(Op.getRegList(), IsVector);
9802   return false;
9803 }
9804 
9805 /// parseDirectiveInst
9806 ///  ::= .inst opcode [, ...]
9807 ///  ::= .inst.n opcode [, ...]
9808 ///  ::= .inst.w opcode [, ...]
9809 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) {
9810   int Width = 4;
9811 
9812   if (isThumb()) {
9813     switch (Suffix) {
9814     case 'n':
9815       Width = 2;
9816       break;
9817     case 'w':
9818       break;
9819     default:
9820       return Error(Loc, "cannot determine Thumb instruction size, "
9821                         "use inst.n/inst.w instead");
9822     }
9823   } else {
9824     if (Suffix)
9825       return Error(Loc, "width suffixes are invalid in ARM mode");
9826   }
9827 
9828   auto parseOne = [&]() -> bool {
9829     const MCExpr *Expr;
9830     if (getParser().parseExpression(Expr))
9831       return true;
9832     const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
9833     if (!Value) {
9834       return Error(Loc, "expected constant expression");
9835     }
9836 
9837     switch (Width) {
9838     case 2:
9839       if (Value->getValue() > 0xffff)
9840         return Error(Loc, "inst.n operand is too big, use inst.w instead");
9841       break;
9842     case 4:
9843       if (Value->getValue() > 0xffffffff)
9844         return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") +
9845                               " operand is too big");
9846       break;
9847     default:
9848       llvm_unreachable("only supported widths are 2 and 4");
9849     }
9850 
9851     getTargetStreamer().emitInst(Value->getValue(), Suffix);
9852     return false;
9853   };
9854 
9855   if (parseOptionalToken(AsmToken::EndOfStatement))
9856     return Error(Loc, "expected expression following directive");
9857   if (parseMany(parseOne))
9858     return true;
9859   return false;
9860 }
9861 
9862 /// parseDirectiveLtorg
9863 ///  ::= .ltorg | .pool
9864 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) {
9865   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
9866     return true;
9867   getTargetStreamer().emitCurrentConstantPool();
9868   return false;
9869 }
9870 
9871 bool ARMAsmParser::parseDirectiveEven(SMLoc L) {
9872   const MCSection *Section = getStreamer().getCurrentSectionOnly();
9873 
9874   if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive"))
9875     return true;
9876 
9877   if (!Section) {
9878     getStreamer().InitSections(false);
9879     Section = getStreamer().getCurrentSectionOnly();
9880   }
9881 
9882   assert(Section && "must have section to emit alignment");
9883   if (Section->UseCodeAlign())
9884     getStreamer().EmitCodeAlignment(2);
9885   else
9886     getStreamer().EmitValueToAlignment(2);
9887 
9888   return false;
9889 }
9890 
9891 /// parseDirectivePersonalityIndex
9892 ///   ::= .personalityindex index
9893 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) {
9894   MCAsmParser &Parser = getParser();
9895   bool HasExistingPersonality = UC.hasPersonality();
9896 
9897   const MCExpr *IndexExpression;
9898   SMLoc IndexLoc = Parser.getTok().getLoc();
9899   if (Parser.parseExpression(IndexExpression) ||
9900       parseToken(AsmToken::EndOfStatement,
9901                  "unexpected token in '.personalityindex' directive")) {
9902     return true;
9903   }
9904 
9905   UC.recordPersonalityIndex(L);
9906 
9907   if (!UC.hasFnStart()) {
9908     return Error(L, ".fnstart must precede .personalityindex directive");
9909   }
9910   if (UC.cantUnwind()) {
9911     Error(L, ".personalityindex cannot be used with .cantunwind");
9912     UC.emitCantUnwindLocNotes();
9913     return true;
9914   }
9915   if (UC.hasHandlerData()) {
9916     Error(L, ".personalityindex must precede .handlerdata directive");
9917     UC.emitHandlerDataLocNotes();
9918     return true;
9919   }
9920   if (HasExistingPersonality) {
9921     Error(L, "multiple personality directives");
9922     UC.emitPersonalityLocNotes();
9923     return true;
9924   }
9925 
9926   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression);
9927   if (!CE)
9928     return Error(IndexLoc, "index must be a constant number");
9929   if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX)
9930     return Error(IndexLoc,
9931                  "personality routine index should be in range [0-3]");
9932 
9933   getTargetStreamer().emitPersonalityIndex(CE->getValue());
9934   return false;
9935 }
9936 
9937 /// parseDirectiveUnwindRaw
9938 ///   ::= .unwind_raw offset, opcode [, opcode...]
9939 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) {
9940   MCAsmParser &Parser = getParser();
9941   int64_t StackOffset;
9942   const MCExpr *OffsetExpr;
9943   SMLoc OffsetLoc = getLexer().getLoc();
9944 
9945   if (!UC.hasFnStart())
9946     return Error(L, ".fnstart must precede .unwind_raw directives");
9947   if (getParser().parseExpression(OffsetExpr))
9948     return Error(OffsetLoc, "expected expression");
9949 
9950   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
9951   if (!CE)
9952     return Error(OffsetLoc, "offset must be a constant");
9953 
9954   StackOffset = CE->getValue();
9955 
9956   if (Parser.parseToken(AsmToken::Comma, "expected comma"))
9957     return true;
9958 
9959   SmallVector<uint8_t, 16> Opcodes;
9960 
9961   auto parseOne = [&]() -> bool {
9962     const MCExpr *OE;
9963     SMLoc OpcodeLoc = getLexer().getLoc();
9964     if (check(getLexer().is(AsmToken::EndOfStatement) ||
9965                   Parser.parseExpression(OE),
9966               OpcodeLoc, "expected opcode expression"))
9967       return true;
9968     const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE);
9969     if (!OC)
9970       return Error(OpcodeLoc, "opcode value must be a constant");
9971     const int64_t Opcode = OC->getValue();
9972     if (Opcode & ~0xff)
9973       return Error(OpcodeLoc, "invalid opcode");
9974     Opcodes.push_back(uint8_t(Opcode));
9975     return false;
9976   };
9977 
9978   // Must have at least 1 element
9979   SMLoc OpcodeLoc = getLexer().getLoc();
9980   if (parseOptionalToken(AsmToken::EndOfStatement))
9981     return Error(OpcodeLoc, "expected opcode expression");
9982   if (parseMany(parseOne))
9983     return true;
9984 
9985   getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes);
9986   return false;
9987 }
9988 
9989 /// parseDirectiveTLSDescSeq
9990 ///   ::= .tlsdescseq tls-variable
9991 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) {
9992   MCAsmParser &Parser = getParser();
9993 
9994   if (getLexer().isNot(AsmToken::Identifier))
9995     return TokError("expected variable after '.tlsdescseq' directive");
9996 
9997   const MCSymbolRefExpr *SRE =
9998     MCSymbolRefExpr::create(Parser.getTok().getIdentifier(),
9999                             MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext());
10000   Lex();
10001 
10002   if (parseToken(AsmToken::EndOfStatement,
10003                  "unexpected token in '.tlsdescseq' directive"))
10004     return true;
10005 
10006   getTargetStreamer().AnnotateTLSDescriptorSequence(SRE);
10007   return false;
10008 }
10009 
10010 /// parseDirectiveMovSP
10011 ///  ::= .movsp reg [, #offset]
10012 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) {
10013   MCAsmParser &Parser = getParser();
10014   if (!UC.hasFnStart())
10015     return Error(L, ".fnstart must precede .movsp directives");
10016   if (UC.getFPReg() != ARM::SP)
10017     return Error(L, "unexpected .movsp directive");
10018 
10019   SMLoc SPRegLoc = Parser.getTok().getLoc();
10020   int SPReg = tryParseRegister();
10021   if (SPReg == -1)
10022     return Error(SPRegLoc, "register expected");
10023   if (SPReg == ARM::SP || SPReg == ARM::PC)
10024     return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive");
10025 
10026   int64_t Offset = 0;
10027   if (Parser.parseOptionalToken(AsmToken::Comma)) {
10028     if (Parser.parseToken(AsmToken::Hash, "expected #constant"))
10029       return true;
10030 
10031     const MCExpr *OffsetExpr;
10032     SMLoc OffsetLoc = Parser.getTok().getLoc();
10033 
10034     if (Parser.parseExpression(OffsetExpr))
10035       return Error(OffsetLoc, "malformed offset expression");
10036 
10037     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr);
10038     if (!CE)
10039       return Error(OffsetLoc, "offset must be an immediate constant");
10040 
10041     Offset = CE->getValue();
10042   }
10043 
10044   if (parseToken(AsmToken::EndOfStatement,
10045                  "unexpected token in '.movsp' directive"))
10046     return true;
10047 
10048   getTargetStreamer().emitMovSP(SPReg, Offset);
10049   UC.saveFPReg(SPReg);
10050 
10051   return false;
10052 }
10053 
10054 /// parseDirectiveObjectArch
10055 ///   ::= .object_arch name
10056 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) {
10057   MCAsmParser &Parser = getParser();
10058   if (getLexer().isNot(AsmToken::Identifier))
10059     return Error(getLexer().getLoc(), "unexpected token");
10060 
10061   StringRef Arch = Parser.getTok().getString();
10062   SMLoc ArchLoc = Parser.getTok().getLoc();
10063   Lex();
10064 
10065   ARM::ArchKind ID = ARM::parseArch(Arch);
10066 
10067   if (ID == ARM::ArchKind::INVALID)
10068     return Error(ArchLoc, "unknown architecture '" + Arch + "'");
10069   if (parseToken(AsmToken::EndOfStatement))
10070     return true;
10071 
10072   getTargetStreamer().emitObjectArch(ID);
10073   return false;
10074 }
10075 
10076 /// parseDirectiveAlign
10077 ///   ::= .align
10078 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) {
10079   // NOTE: if this is not the end of the statement, fall back to the target
10080   // agnostic handling for this directive which will correctly handle this.
10081   if (parseOptionalToken(AsmToken::EndOfStatement)) {
10082     // '.align' is target specifically handled to mean 2**2 byte alignment.
10083     const MCSection *Section = getStreamer().getCurrentSectionOnly();
10084     assert(Section && "must have section to emit alignment");
10085     if (Section->UseCodeAlign())
10086       getStreamer().EmitCodeAlignment(4, 0);
10087     else
10088       getStreamer().EmitValueToAlignment(4, 0, 1, 0);
10089     return false;
10090   }
10091   return true;
10092 }
10093 
10094 /// parseDirectiveThumbSet
10095 ///  ::= .thumb_set name, value
10096 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) {
10097   MCAsmParser &Parser = getParser();
10098 
10099   StringRef Name;
10100   if (check(Parser.parseIdentifier(Name),
10101             "expected identifier after '.thumb_set'") ||
10102       parseToken(AsmToken::Comma, "expected comma after name '" + Name + "'"))
10103     return true;
10104 
10105   MCSymbol *Sym;
10106   const MCExpr *Value;
10107   if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true,
10108                                                Parser, Sym, Value))
10109     return true;
10110 
10111   getTargetStreamer().emitThumbSet(Sym, Value);
10112   return false;
10113 }
10114 
10115 /// Force static initialization.
10116 extern "C" void LLVMInitializeARMAsmParser() {
10117   RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget());
10118   RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget());
10119   RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget());
10120   RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget());
10121 }
10122 
10123 #define GET_REGISTER_MATCHER
10124 #define GET_SUBTARGET_FEATURE_NAME
10125 #define GET_MATCHER_IMPLEMENTATION
10126 #include "ARMGenAsmMatcher.inc"
10127 
10128 // Some diagnostics need to vary with subtarget features, so they are handled
10129 // here. For example, the DPR class has either 16 or 32 registers, depending
10130 // on the FPU available.
10131 const char *
10132 ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) {
10133   switch (MatchError) {
10134   // rGPR contains sp starting with ARMv8.
10135   case Match_rGPR:
10136     return hasV8Ops() ? "operand must be a register in range [r0, r14]"
10137                       : "operand must be a register in range [r0, r12] or r14";
10138   // DPR contains 16 registers for some FPUs, and 32 for others.
10139   case Match_DPR:
10140     return hasD16() ? "operand must be a register in range [d0, d15]"
10141                     : "operand must be a register in range [d0, d31]";
10142 
10143   // For all other diags, use the static string from tablegen.
10144   default:
10145     return getMatchKindDiag(MatchError);
10146   }
10147 }
10148 
10149 // Process the list of near-misses, throwing away ones we don't want to report
10150 // to the user, and converting the rest to a source location and string that
10151 // should be reported.
10152 void
10153 ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn,
10154                                SmallVectorImpl<NearMissMessage> &NearMissesOut,
10155                                SMLoc IDLoc, OperandVector &Operands) {
10156   // TODO: If operand didn't match, sub in a dummy one and run target
10157   // predicate, so that we can avoid reporting near-misses that are invalid?
10158   // TODO: Many operand types dont have SuperClasses set, so we report
10159   // redundant ones.
10160   // TODO: Some operands are superclasses of registers (e.g.
10161   // MCK_RegShiftedImm), we don't have any way to represent that currently.
10162   // TODO: This is not all ARM-specific, can some of it be factored out?
10163 
10164   // Record some information about near-misses that we have already seen, so
10165   // that we can avoid reporting redundant ones. For example, if there are
10166   // variants of an instruction that take 8- and 16-bit immediates, we want
10167   // to only report the widest one.
10168   std::multimap<unsigned, unsigned> OperandMissesSeen;
10169   SmallSet<uint64_t, 4> FeatureMissesSeen;
10170 
10171   // Process the near-misses in reverse order, so that we see more general ones
10172   // first, and so can avoid emitting more specific ones.
10173   for (NearMissInfo &I : reverse(NearMissesIn)) {
10174     switch (I.getKind()) {
10175     case NearMissInfo::NearMissOperand: {
10176       SMLoc OperandLoc =
10177           ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc();
10178       const char *OperandDiag =
10179           getCustomOperandDiag((ARMMatchResultTy)I.getOperandError());
10180 
10181       // If we have already emitted a message for a superclass, don't also report
10182       // the sub-class. We consider all operand classes that we don't have a
10183       // specialised diagnostic for to be equal for the propose of this check,
10184       // so that we don't report the generic error multiple times on the same
10185       // operand.
10186       unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U;
10187       auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex());
10188       if (std::any_of(PrevReports.first, PrevReports.second,
10189                       [DupCheckMatchClass](
10190                           const std::pair<unsigned, unsigned> Pair) {
10191             if (DupCheckMatchClass == ~0U || Pair.second == ~0U)
10192               return Pair.second == DupCheckMatchClass;
10193             else
10194               return isSubclass((MatchClassKind)DupCheckMatchClass,
10195                                 (MatchClassKind)Pair.second);
10196           }))
10197         break;
10198       OperandMissesSeen.insert(
10199           std::make_pair(I.getOperandIndex(), DupCheckMatchClass));
10200 
10201       NearMissMessage Message;
10202       Message.Loc = OperandLoc;
10203       raw_svector_ostream OS(Message.Message);
10204       if (OperandDiag) {
10205         OS << OperandDiag;
10206       } else if (I.getOperandClass() == InvalidMatchClass) {
10207         OS << "too many operands for instruction";
10208       } else {
10209         OS << "invalid operand for instruction";
10210         if (DevDiags) {
10211           OS << " class" << I.getOperandClass() << ", error "
10212              << I.getOperandError() << ", opcode "
10213              << MII.getName(I.getOpcode());
10214         }
10215       }
10216       NearMissesOut.emplace_back(Message);
10217       break;
10218     }
10219     case NearMissInfo::NearMissFeature: {
10220       uint64_t MissingFeatures = I.getFeatures();
10221       // Don't report the same set of features twice.
10222       if (FeatureMissesSeen.count(MissingFeatures))
10223         break;
10224       FeatureMissesSeen.insert(MissingFeatures);
10225 
10226       // Special case: don't report a feature set which includes arm-mode for
10227       // targets that don't have ARM mode.
10228       if ((MissingFeatures & Feature_IsARM) && !hasARM())
10229         break;
10230       // Don't report any near-misses that both require switching instruction
10231       // set, and adding other subtarget features.
10232       if (isThumb() && (MissingFeatures & Feature_IsARM) &&
10233           (MissingFeatures & ~Feature_IsARM))
10234         break;
10235       if (!isThumb() && (MissingFeatures & Feature_IsThumb) &&
10236           (MissingFeatures & ~Feature_IsThumb))
10237         break;
10238       if (!isThumb() && (MissingFeatures & Feature_IsThumb2) &&
10239           (MissingFeatures & ~(Feature_IsThumb2 | Feature_IsThumb)))
10240         break;
10241 
10242       NearMissMessage Message;
10243       Message.Loc = IDLoc;
10244       raw_svector_ostream OS(Message.Message);
10245 
10246       OS << "instruction requires:";
10247       uint64_t Mask = 1;
10248       for (unsigned MaskPos = 0; MaskPos < (sizeof(MissingFeatures) * 8 - 1);
10249            ++MaskPos) {
10250         if (MissingFeatures & Mask) {
10251           OS << " " << getSubtargetFeatureName(MissingFeatures & Mask);
10252         }
10253         Mask <<= 1;
10254       }
10255       NearMissesOut.emplace_back(Message);
10256 
10257       break;
10258     }
10259     case NearMissInfo::NearMissPredicate: {
10260       NearMissMessage Message;
10261       Message.Loc = IDLoc;
10262       switch (I.getPredicateError()) {
10263       case Match_RequiresNotITBlock:
10264         Message.Message = "flag setting instruction only valid outside IT block";
10265         break;
10266       case Match_RequiresITBlock:
10267         Message.Message = "instruction only valid inside IT block";
10268         break;
10269       case Match_RequiresV6:
10270         Message.Message = "instruction variant requires ARMv6 or later";
10271         break;
10272       case Match_RequiresThumb2:
10273         Message.Message = "instruction variant requires Thumb2";
10274         break;
10275       case Match_RequiresV8:
10276         Message.Message = "instruction variant requires ARMv8 or later";
10277         break;
10278       case Match_RequiresFlagSetting:
10279         Message.Message = "no flag-preserving variant of this instruction available";
10280         break;
10281       case Match_InvalidOperand:
10282         Message.Message = "invalid operand for instruction";
10283         break;
10284       default:
10285         llvm_unreachable("Unhandled target predicate error");
10286         break;
10287       }
10288       NearMissesOut.emplace_back(Message);
10289       break;
10290     }
10291     case NearMissInfo::NearMissTooFewOperands: {
10292       SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc();
10293       NearMissesOut.emplace_back(
10294           NearMissMessage{ EndLoc, StringRef("too few operands for instruction") });
10295       break;
10296     }
10297     case NearMissInfo::NoNearMiss:
10298       // This should never leave the matcher.
10299       llvm_unreachable("not a near-miss");
10300       break;
10301     }
10302   }
10303 }
10304 
10305 void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses,
10306                                     SMLoc IDLoc, OperandVector &Operands) {
10307   SmallVector<NearMissMessage, 4> Messages;
10308   FilterNearMisses(NearMisses, Messages, IDLoc, Operands);
10309 
10310   if (Messages.size() == 0) {
10311     // No near-misses were found, so the best we can do is "invalid
10312     // instruction".
10313     Error(IDLoc, "invalid instruction");
10314   } else if (Messages.size() == 1) {
10315     // One near miss was found, report it as the sole error.
10316     Error(Messages[0].Loc, Messages[0].Message);
10317   } else {
10318     // More than one near miss, so report a generic "invalid instruction"
10319     // error, followed by notes for each of the near-misses.
10320     Error(IDLoc, "invalid instruction, any one of the following would fix this:");
10321     for (auto &M : Messages) {
10322       Note(M.Loc, M.Message);
10323     }
10324   }
10325 }
10326 
10327 // FIXME: This structure should be moved inside ARMTargetParser
10328 // when we start to table-generate them, and we can use the ARM
10329 // flags below, that were generated by table-gen.
10330 static const struct {
10331   const unsigned Kind;
10332   const uint64_t ArchCheck;
10333   const FeatureBitset Features;
10334 } Extensions[] = {
10335   { ARM::AEK_CRC, Feature_HasV8, {ARM::FeatureCRC} },
10336   { ARM::AEK_CRYPTO,  Feature_HasV8,
10337     {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} },
10338   { ARM::AEK_FP, Feature_HasV8, {ARM::FeatureFPARMv8} },
10339   { (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM), Feature_HasV7 | Feature_IsNotMClass,
10340     {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} },
10341   { ARM::AEK_MP, Feature_HasV7 | Feature_IsNotMClass, {ARM::FeatureMP} },
10342   { ARM::AEK_SIMD, Feature_HasV8, {ARM::FeatureNEON, ARM::FeatureFPARMv8} },
10343   { ARM::AEK_SEC, Feature_HasV6K, {ARM::FeatureTrustZone} },
10344   // FIXME: Only available in A-class, isel not predicated
10345   { ARM::AEK_VIRT, Feature_HasV7, {ARM::FeatureVirtualization} },
10346   { ARM::AEK_FP16, Feature_HasV8_2a, {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} },
10347   { ARM::AEK_RAS, Feature_HasV8, {ARM::FeatureRAS} },
10348   // FIXME: Unsupported extensions.
10349   { ARM::AEK_OS, Feature_None, {} },
10350   { ARM::AEK_IWMMXT, Feature_None, {} },
10351   { ARM::AEK_IWMMXT2, Feature_None, {} },
10352   { ARM::AEK_MAVERICK, Feature_None, {} },
10353   { ARM::AEK_XSCALE, Feature_None, {} },
10354 };
10355 
10356 /// parseDirectiveArchExtension
10357 ///   ::= .arch_extension [no]feature
10358 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) {
10359   MCAsmParser &Parser = getParser();
10360 
10361   if (getLexer().isNot(AsmToken::Identifier))
10362     return Error(getLexer().getLoc(), "expected architecture extension name");
10363 
10364   StringRef Name = Parser.getTok().getString();
10365   SMLoc ExtLoc = Parser.getTok().getLoc();
10366   Lex();
10367 
10368   if (parseToken(AsmToken::EndOfStatement,
10369                  "unexpected token in '.arch_extension' directive"))
10370     return true;
10371 
10372   bool EnableFeature = true;
10373   if (Name.startswith_lower("no")) {
10374     EnableFeature = false;
10375     Name = Name.substr(2);
10376   }
10377   unsigned FeatureKind = ARM::parseArchExt(Name);
10378   if (FeatureKind == ARM::AEK_INVALID)
10379     return Error(ExtLoc, "unknown architectural extension: " + Name);
10380 
10381   for (const auto &Extension : Extensions) {
10382     if (Extension.Kind != FeatureKind)
10383       continue;
10384 
10385     if (Extension.Features.none())
10386       return Error(ExtLoc, "unsupported architectural extension: " + Name);
10387 
10388     if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck)
10389       return Error(ExtLoc, "architectural extension '" + Name +
10390                                "' is not "
10391                                "allowed for the current base architecture");
10392 
10393     MCSubtargetInfo &STI = copySTI();
10394     FeatureBitset ToggleFeatures = EnableFeature
10395       ? (~STI.getFeatureBits() & Extension.Features)
10396       : ( STI.getFeatureBits() & Extension.Features);
10397 
10398     uint64_t Features =
10399         ComputeAvailableFeatures(STI.ToggleFeature(ToggleFeatures));
10400     setAvailableFeatures(Features);
10401     return false;
10402   }
10403 
10404   return Error(ExtLoc, "unknown architectural extension: " + Name);
10405 }
10406 
10407 // Define this matcher function after the auto-generated include so we
10408 // have the match class enum definitions.
10409 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
10410                                                   unsigned Kind) {
10411   ARMOperand &Op = static_cast<ARMOperand &>(AsmOp);
10412   // If the kind is a token for a literal immediate, check if our asm
10413   // operand matches. This is for InstAliases which have a fixed-value
10414   // immediate in the syntax.
10415   switch (Kind) {
10416   default: break;
10417   case MCK__35_0:
10418     if (Op.isImm())
10419       if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()))
10420         if (CE->getValue() == 0)
10421           return Match_Success;
10422     break;
10423   case MCK_ModImm:
10424     if (Op.isImm()) {
10425       const MCExpr *SOExpr = Op.getImm();
10426       int64_t Value;
10427       if (!SOExpr->evaluateAsAbsolute(Value))
10428         return Match_Success;
10429       assert((Value >= std::numeric_limits<int32_t>::min() &&
10430               Value <= std::numeric_limits<uint32_t>::max()) &&
10431              "expression value must be representable in 32 bits");
10432     }
10433     break;
10434   case MCK_rGPR:
10435     if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP)
10436       return Match_Success;
10437     return Match_rGPR;
10438   case MCK_GPRPair:
10439     if (Op.isReg() &&
10440         MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg()))
10441       return Match_Success;
10442     break;
10443   }
10444   return Match_InvalidOperand;
10445 }
10446