1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "MCTargetDesc/ARMBaseInfo.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMMCExpr.h"
13 #include "llvm/MC/MCParser/MCAsmLexer.h"
14 #include "llvm/MC/MCParser/MCAsmParser.h"
15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16 #include "llvm/MC/MCAsmInfo.h"
17 #include "llvm/MC/MCContext.h"
18 #include "llvm/MC/MCStreamer.h"
19 #include "llvm/MC/MCExpr.h"
20 #include "llvm/MC/MCInst.h"
21 #include "llvm/MC/MCInstrDesc.h"
22 #include "llvm/MC/MCRegisterInfo.h"
23 #include "llvm/MC/MCSubtargetInfo.h"
24 #include "llvm/MC/MCTargetAsmParser.h"
25 #include "llvm/Support/MathExtras.h"
26 #include "llvm/Support/SourceMgr.h"
27 #include "llvm/Support/TargetRegistry.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/ADT/BitVector.h"
30 #include "llvm/ADT/OwningPtr.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/ADT/SmallVector.h"
33 #include "llvm/ADT/StringSwitch.h"
34 #include "llvm/ADT/Twine.h"
35 
36 using namespace llvm;
37 
38 namespace {
39 
40 class ARMOperand;
41 
42 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43 
44 class ARMAsmParser : public MCTargetAsmParser {
45   MCSubtargetInfo &STI;
46   MCAsmParser &Parser;
47   const MCRegisterInfo *MRI;
48 
49   // Map of register aliases registers via the .req directive.
50   StringMap<unsigned> RegisterReqs;
51 
52   struct {
53     ARMCC::CondCodes Cond;    // Condition for IT block.
54     unsigned Mask:4;          // Condition mask for instructions.
55                               // Starting at first 1 (from lsb).
56                               //   '1'  condition as indicated in IT.
57                               //   '0'  inverse of condition (else).
58                               // Count of instructions in IT block is
59                               // 4 - trailingzeroes(mask)
60 
61     bool FirstCond;           // Explicit flag for when we're parsing the
62                               // First instruction in the IT block. It's
63                               // implied in the mask, so needs special
64                               // handling.
65 
66     unsigned CurPosition;     // Current position in parsing of IT
67                               // block. In range [0,3]. Initialized
68                               // according to count of instructions in block.
69                               // ~0U if no active IT block.
70   } ITState;
71   bool inITBlock() { return ITState.CurPosition != ~0U;}
72   void forwardITPosition() {
73     if (!inITBlock()) return;
74     // Move to the next instruction in the IT block, if there is one. If not,
75     // mark the block as done.
76     unsigned TZ = CountTrailingZeros_32(ITState.Mask);
77     if (++ITState.CurPosition == 5 - TZ)
78       ITState.CurPosition = ~0U; // Done with the IT block after this.
79   }
80 
81 
82   MCAsmParser &getParser() const { return Parser; }
83   MCAsmLexer &getLexer() const { return Parser.getLexer(); }
84 
85   bool Warning(SMLoc L, const Twine &Msg,
86                ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
87     return Parser.Warning(L, Msg, Ranges);
88   }
89   bool Error(SMLoc L, const Twine &Msg,
90              ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
91     return Parser.Error(L, Msg, Ranges);
92   }
93 
94   int tryParseRegister();
95   bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
96   int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
97   bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
98   bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
99   bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
100   bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
101   bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
102                               unsigned &ShiftAmount);
103   bool parseDirectiveWord(unsigned Size, SMLoc L);
104   bool parseDirectiveThumb(SMLoc L);
105   bool parseDirectiveARM(SMLoc L);
106   bool parseDirectiveThumbFunc(SMLoc L);
107   bool parseDirectiveCode(SMLoc L);
108   bool parseDirectiveSyntax(SMLoc L);
109   bool parseDirectiveReq(StringRef Name, SMLoc L);
110   bool parseDirectiveUnreq(SMLoc L);
111   bool parseDirectiveArch(SMLoc L);
112   bool parseDirectiveEabiAttr(SMLoc L);
113 
114   StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
115                           bool &CarrySetting, unsigned &ProcessorIMod,
116                           StringRef &ITMask);
117   void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
118                              bool &CanAcceptPredicationCode);
119 
120   bool isThumb() const {
121     // FIXME: Can tablegen auto-generate this?
122     return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
123   }
124   bool isThumbOne() const {
125     return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
126   }
127   bool isThumbTwo() const {
128     return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
129   }
130   bool hasV6Ops() const {
131     return STI.getFeatureBits() & ARM::HasV6Ops;
132   }
133   bool hasV7Ops() const {
134     return STI.getFeatureBits() & ARM::HasV7Ops;
135   }
136   void SwitchMode() {
137     unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
138     setAvailableFeatures(FB);
139   }
140   bool isMClass() const {
141     return STI.getFeatureBits() & ARM::FeatureMClass;
142   }
143 
144   /// @name Auto-generated Match Functions
145   /// {
146 
147 #define GET_ASSEMBLER_HEADER
148 #include "ARMGenAsmMatcher.inc"
149 
150   /// }
151 
152   OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
153   OperandMatchResultTy parseCoprocNumOperand(
154     SmallVectorImpl<MCParsedAsmOperand*>&);
155   OperandMatchResultTy parseCoprocRegOperand(
156     SmallVectorImpl<MCParsedAsmOperand*>&);
157   OperandMatchResultTy parseCoprocOptionOperand(
158     SmallVectorImpl<MCParsedAsmOperand*>&);
159   OperandMatchResultTy parseMemBarrierOptOperand(
160     SmallVectorImpl<MCParsedAsmOperand*>&);
161   OperandMatchResultTy parseProcIFlagsOperand(
162     SmallVectorImpl<MCParsedAsmOperand*>&);
163   OperandMatchResultTy parseMSRMaskOperand(
164     SmallVectorImpl<MCParsedAsmOperand*>&);
165   OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
166                                    StringRef Op, int Low, int High);
167   OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
168     return parsePKHImm(O, "lsl", 0, 31);
169   }
170   OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
171     return parsePKHImm(O, "asr", 1, 32);
172   }
173   OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
174   OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
175   OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
176   OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
177   OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
178   OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
179   OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
180   OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
181   OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
182 
183   // Asm Match Converter Methods
184   bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
185                     const SmallVectorImpl<MCParsedAsmOperand*> &);
186   bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
187                     const SmallVectorImpl<MCParsedAsmOperand*> &);
188   bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
189                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
190   bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
191                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
192   bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
193                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
194   bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
195                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
196   bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
197                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
198   bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
199                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
200   bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
201                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
202   bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
203                              const SmallVectorImpl<MCParsedAsmOperand*> &);
204   bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
205                              const SmallVectorImpl<MCParsedAsmOperand*> &);
206   bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
207                              const SmallVectorImpl<MCParsedAsmOperand*> &);
208   bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
209                              const SmallVectorImpl<MCParsedAsmOperand*> &);
210   bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
211                   const SmallVectorImpl<MCParsedAsmOperand*> &);
212   bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
213                   const SmallVectorImpl<MCParsedAsmOperand*> &);
214   bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
215                                   const SmallVectorImpl<MCParsedAsmOperand*> &);
216   bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
217                         const SmallVectorImpl<MCParsedAsmOperand*> &);
218   bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
219                      const SmallVectorImpl<MCParsedAsmOperand*> &);
220   bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
221                         const SmallVectorImpl<MCParsedAsmOperand*> &);
222   bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
223                      const SmallVectorImpl<MCParsedAsmOperand*> &);
224   bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
225                         const SmallVectorImpl<MCParsedAsmOperand*> &);
226 
227   bool validateInstruction(MCInst &Inst,
228                            const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
229   bool processInstruction(MCInst &Inst,
230                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
231   bool shouldOmitCCOutOperand(StringRef Mnemonic,
232                               SmallVectorImpl<MCParsedAsmOperand*> &Operands);
233 
234 public:
235   enum ARMMatchResultTy {
236     Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
237     Match_RequiresNotITBlock,
238     Match_RequiresV6,
239     Match_RequiresThumb2
240   };
241 
242   ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
243     : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
244     MCAsmParserExtension::Initialize(_Parser);
245 
246     // Cache the MCRegisterInfo.
247     MRI = &getContext().getRegisterInfo();
248 
249     // Initialize the set of available features.
250     setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
251 
252     // Not in an ITBlock to start with.
253     ITState.CurPosition = ~0U;
254   }
255 
256   // Implementation of the MCTargetAsmParser interface:
257   bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
258   bool ParseInstruction(StringRef Name, SMLoc NameLoc,
259                         SmallVectorImpl<MCParsedAsmOperand*> &Operands);
260   bool ParseDirective(AsmToken DirectiveID);
261 
262   unsigned checkTargetMatchPredicate(MCInst &Inst);
263 
264   bool MatchAndEmitInstruction(SMLoc IDLoc,
265                                SmallVectorImpl<MCParsedAsmOperand*> &Operands,
266                                MCStreamer &Out);
267 };
268 } // end anonymous namespace
269 
270 namespace {
271 
272 /// ARMOperand - Instances of this class represent a parsed ARM machine
273 /// instruction.
274 class ARMOperand : public MCParsedAsmOperand {
275   enum KindTy {
276     k_CondCode,
277     k_CCOut,
278     k_ITCondMask,
279     k_CoprocNum,
280     k_CoprocReg,
281     k_CoprocOption,
282     k_Immediate,
283     k_MemBarrierOpt,
284     k_Memory,
285     k_PostIndexRegister,
286     k_MSRMask,
287     k_ProcIFlags,
288     k_VectorIndex,
289     k_Register,
290     k_RegisterList,
291     k_DPRRegisterList,
292     k_SPRRegisterList,
293     k_VectorList,
294     k_VectorListAllLanes,
295     k_VectorListIndexed,
296     k_ShiftedRegister,
297     k_ShiftedImmediate,
298     k_ShifterImmediate,
299     k_RotateImmediate,
300     k_BitfieldDescriptor,
301     k_Token
302   } Kind;
303 
304   SMLoc StartLoc, EndLoc;
305   SmallVector<unsigned, 8> Registers;
306 
307   union {
308     struct {
309       ARMCC::CondCodes Val;
310     } CC;
311 
312     struct {
313       unsigned Val;
314     } Cop;
315 
316     struct {
317       unsigned Val;
318     } CoprocOption;
319 
320     struct {
321       unsigned Mask:4;
322     } ITMask;
323 
324     struct {
325       ARM_MB::MemBOpt Val;
326     } MBOpt;
327 
328     struct {
329       ARM_PROC::IFlags Val;
330     } IFlags;
331 
332     struct {
333       unsigned Val;
334     } MMask;
335 
336     struct {
337       const char *Data;
338       unsigned Length;
339     } Tok;
340 
341     struct {
342       unsigned RegNum;
343     } Reg;
344 
345     // A vector register list is a sequential list of 1 to 4 registers.
346     struct {
347       unsigned RegNum;
348       unsigned Count;
349       unsigned LaneIndex;
350       bool isDoubleSpaced;
351     } VectorList;
352 
353     struct {
354       unsigned Val;
355     } VectorIndex;
356 
357     struct {
358       const MCExpr *Val;
359     } Imm;
360 
361     /// Combined record for all forms of ARM address expressions.
362     struct {
363       unsigned BaseRegNum;
364       // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
365       // was specified.
366       const MCConstantExpr *OffsetImm;  // Offset immediate value
367       unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
368       ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
369       unsigned ShiftImm;        // shift for OffsetReg.
370       unsigned Alignment;       // 0 = no alignment specified
371                                 // n = alignment in bytes (2, 4, 8, 16, or 32)
372       unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
373     } Memory;
374 
375     struct {
376       unsigned RegNum;
377       bool isAdd;
378       ARM_AM::ShiftOpc ShiftTy;
379       unsigned ShiftImm;
380     } PostIdxReg;
381 
382     struct {
383       bool isASR;
384       unsigned Imm;
385     } ShifterImm;
386     struct {
387       ARM_AM::ShiftOpc ShiftTy;
388       unsigned SrcReg;
389       unsigned ShiftReg;
390       unsigned ShiftImm;
391     } RegShiftedReg;
392     struct {
393       ARM_AM::ShiftOpc ShiftTy;
394       unsigned SrcReg;
395       unsigned ShiftImm;
396     } RegShiftedImm;
397     struct {
398       unsigned Imm;
399     } RotImm;
400     struct {
401       unsigned LSB;
402       unsigned Width;
403     } Bitfield;
404   };
405 
406   ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
407 public:
408   ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
409     Kind = o.Kind;
410     StartLoc = o.StartLoc;
411     EndLoc = o.EndLoc;
412     switch (Kind) {
413     case k_CondCode:
414       CC = o.CC;
415       break;
416     case k_ITCondMask:
417       ITMask = o.ITMask;
418       break;
419     case k_Token:
420       Tok = o.Tok;
421       break;
422     case k_CCOut:
423     case k_Register:
424       Reg = o.Reg;
425       break;
426     case k_RegisterList:
427     case k_DPRRegisterList:
428     case k_SPRRegisterList:
429       Registers = o.Registers;
430       break;
431     case k_VectorList:
432     case k_VectorListAllLanes:
433     case k_VectorListIndexed:
434       VectorList = o.VectorList;
435       break;
436     case k_CoprocNum:
437     case k_CoprocReg:
438       Cop = o.Cop;
439       break;
440     case k_CoprocOption:
441       CoprocOption = o.CoprocOption;
442       break;
443     case k_Immediate:
444       Imm = o.Imm;
445       break;
446     case k_MemBarrierOpt:
447       MBOpt = o.MBOpt;
448       break;
449     case k_Memory:
450       Memory = o.Memory;
451       break;
452     case k_PostIndexRegister:
453       PostIdxReg = o.PostIdxReg;
454       break;
455     case k_MSRMask:
456       MMask = o.MMask;
457       break;
458     case k_ProcIFlags:
459       IFlags = o.IFlags;
460       break;
461     case k_ShifterImmediate:
462       ShifterImm = o.ShifterImm;
463       break;
464     case k_ShiftedRegister:
465       RegShiftedReg = o.RegShiftedReg;
466       break;
467     case k_ShiftedImmediate:
468       RegShiftedImm = o.RegShiftedImm;
469       break;
470     case k_RotateImmediate:
471       RotImm = o.RotImm;
472       break;
473     case k_BitfieldDescriptor:
474       Bitfield = o.Bitfield;
475       break;
476     case k_VectorIndex:
477       VectorIndex = o.VectorIndex;
478       break;
479     }
480   }
481 
482   /// getStartLoc - Get the location of the first token of this operand.
483   SMLoc getStartLoc() const { return StartLoc; }
484   /// getEndLoc - Get the location of the last token of this operand.
485   SMLoc getEndLoc() const { return EndLoc; }
486 
487   SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
488 
489   ARMCC::CondCodes getCondCode() const {
490     assert(Kind == k_CondCode && "Invalid access!");
491     return CC.Val;
492   }
493 
494   unsigned getCoproc() const {
495     assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
496     return Cop.Val;
497   }
498 
499   StringRef getToken() const {
500     assert(Kind == k_Token && "Invalid access!");
501     return StringRef(Tok.Data, Tok.Length);
502   }
503 
504   unsigned getReg() const {
505     assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
506     return Reg.RegNum;
507   }
508 
509   const SmallVectorImpl<unsigned> &getRegList() const {
510     assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
511             Kind == k_SPRRegisterList) && "Invalid access!");
512     return Registers;
513   }
514 
515   const MCExpr *getImm() const {
516     assert(isImm() && "Invalid access!");
517     return Imm.Val;
518   }
519 
520   unsigned getVectorIndex() const {
521     assert(Kind == k_VectorIndex && "Invalid access!");
522     return VectorIndex.Val;
523   }
524 
525   ARM_MB::MemBOpt getMemBarrierOpt() const {
526     assert(Kind == k_MemBarrierOpt && "Invalid access!");
527     return MBOpt.Val;
528   }
529 
530   ARM_PROC::IFlags getProcIFlags() const {
531     assert(Kind == k_ProcIFlags && "Invalid access!");
532     return IFlags.Val;
533   }
534 
535   unsigned getMSRMask() const {
536     assert(Kind == k_MSRMask && "Invalid access!");
537     return MMask.Val;
538   }
539 
540   bool isCoprocNum() const { return Kind == k_CoprocNum; }
541   bool isCoprocReg() const { return Kind == k_CoprocReg; }
542   bool isCoprocOption() const { return Kind == k_CoprocOption; }
543   bool isCondCode() const { return Kind == k_CondCode; }
544   bool isCCOut() const { return Kind == k_CCOut; }
545   bool isITMask() const { return Kind == k_ITCondMask; }
546   bool isITCondCode() const { return Kind == k_CondCode; }
547   bool isImm() const { return Kind == k_Immediate; }
548   bool isFPImm() const {
549     if (!isImm()) return false;
550     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
551     if (!CE) return false;
552     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
553     return Val != -1;
554   }
555   bool isFBits16() const {
556     if (!isImm()) return false;
557     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
558     if (!CE) return false;
559     int64_t Value = CE->getValue();
560     return Value >= 0 && Value <= 16;
561   }
562   bool isFBits32() const {
563     if (!isImm()) return false;
564     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
565     if (!CE) return false;
566     int64_t Value = CE->getValue();
567     return Value >= 1 && Value <= 32;
568   }
569   bool isImm8s4() const {
570     if (!isImm()) return false;
571     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
572     if (!CE) return false;
573     int64_t Value = CE->getValue();
574     return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
575   }
576   bool isImm0_1020s4() const {
577     if (!isImm()) return false;
578     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
579     if (!CE) return false;
580     int64_t Value = CE->getValue();
581     return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
582   }
583   bool isImm0_508s4() const {
584     if (!isImm()) return false;
585     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
586     if (!CE) return false;
587     int64_t Value = CE->getValue();
588     return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
589   }
590   bool isImm0_508s4Neg() const {
591     if (!isImm()) return false;
592     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
593     if (!CE) return false;
594     int64_t Value = -CE->getValue();
595     // explicitly exclude zero. we want that to use the normal 0_508 version.
596     return ((Value & 3) == 0) && Value > 0 && Value <= 508;
597   }
598   bool isImm0_255() const {
599     if (!isImm()) return false;
600     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
601     if (!CE) return false;
602     int64_t Value = CE->getValue();
603     return Value >= 0 && Value < 256;
604   }
605   bool isImm0_4095() const {
606     if (!isImm()) return false;
607     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
608     if (!CE) return false;
609     int64_t Value = CE->getValue();
610     return Value >= 0 && Value < 4096;
611   }
612   bool isImm0_4095Neg() const {
613     if (!isImm()) return false;
614     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
615     if (!CE) return false;
616     int64_t Value = -CE->getValue();
617     return Value > 0 && Value < 4096;
618   }
619   bool isImm0_1() const {
620     if (!isImm()) return false;
621     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
622     if (!CE) return false;
623     int64_t Value = CE->getValue();
624     return Value >= 0 && Value < 2;
625   }
626   bool isImm0_3() const {
627     if (!isImm()) return false;
628     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
629     if (!CE) return false;
630     int64_t Value = CE->getValue();
631     return Value >= 0 && Value < 4;
632   }
633   bool isImm0_7() const {
634     if (!isImm()) return false;
635     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
636     if (!CE) return false;
637     int64_t Value = CE->getValue();
638     return Value >= 0 && Value < 8;
639   }
640   bool isImm0_15() const {
641     if (!isImm()) return false;
642     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
643     if (!CE) return false;
644     int64_t Value = CE->getValue();
645     return Value >= 0 && Value < 16;
646   }
647   bool isImm0_31() const {
648     if (!isImm()) return false;
649     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
650     if (!CE) return false;
651     int64_t Value = CE->getValue();
652     return Value >= 0 && Value < 32;
653   }
654   bool isImm0_63() const {
655     if (!isImm()) return false;
656     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
657     if (!CE) return false;
658     int64_t Value = CE->getValue();
659     return Value >= 0 && Value < 64;
660   }
661   bool isImm8() const {
662     if (!isImm()) return false;
663     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
664     if (!CE) return false;
665     int64_t Value = CE->getValue();
666     return Value == 8;
667   }
668   bool isImm16() const {
669     if (!isImm()) return false;
670     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
671     if (!CE) return false;
672     int64_t Value = CE->getValue();
673     return Value == 16;
674   }
675   bool isImm32() const {
676     if (!isImm()) return false;
677     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
678     if (!CE) return false;
679     int64_t Value = CE->getValue();
680     return Value == 32;
681   }
682   bool isShrImm8() const {
683     if (!isImm()) return false;
684     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
685     if (!CE) return false;
686     int64_t Value = CE->getValue();
687     return Value > 0 && Value <= 8;
688   }
689   bool isShrImm16() const {
690     if (!isImm()) return false;
691     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
692     if (!CE) return false;
693     int64_t Value = CE->getValue();
694     return Value > 0 && Value <= 16;
695   }
696   bool isShrImm32() const {
697     if (!isImm()) return false;
698     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
699     if (!CE) return false;
700     int64_t Value = CE->getValue();
701     return Value > 0 && Value <= 32;
702   }
703   bool isShrImm64() const {
704     if (!isImm()) return false;
705     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
706     if (!CE) return false;
707     int64_t Value = CE->getValue();
708     return Value > 0 && Value <= 64;
709   }
710   bool isImm1_7() const {
711     if (!isImm()) return false;
712     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
713     if (!CE) return false;
714     int64_t Value = CE->getValue();
715     return Value > 0 && Value < 8;
716   }
717   bool isImm1_15() const {
718     if (!isImm()) return false;
719     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
720     if (!CE) return false;
721     int64_t Value = CE->getValue();
722     return Value > 0 && Value < 16;
723   }
724   bool isImm1_31() const {
725     if (!isImm()) return false;
726     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
727     if (!CE) return false;
728     int64_t Value = CE->getValue();
729     return Value > 0 && Value < 32;
730   }
731   bool isImm1_16() const {
732     if (!isImm()) return false;
733     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
734     if (!CE) return false;
735     int64_t Value = CE->getValue();
736     return Value > 0 && Value < 17;
737   }
738   bool isImm1_32() const {
739     if (!isImm()) return false;
740     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
741     if (!CE) return false;
742     int64_t Value = CE->getValue();
743     return Value > 0 && Value < 33;
744   }
745   bool isImm0_32() const {
746     if (!isImm()) return false;
747     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
748     if (!CE) return false;
749     int64_t Value = CE->getValue();
750     return Value >= 0 && Value < 33;
751   }
752   bool isImm0_65535() const {
753     if (!isImm()) return false;
754     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
755     if (!CE) return false;
756     int64_t Value = CE->getValue();
757     return Value >= 0 && Value < 65536;
758   }
759   bool isImm0_65535Expr() const {
760     if (!isImm()) return false;
761     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762     // If it's not a constant expression, it'll generate a fixup and be
763     // handled later.
764     if (!CE) return true;
765     int64_t Value = CE->getValue();
766     return Value >= 0 && Value < 65536;
767   }
768   bool isImm24bit() const {
769     if (!isImm()) return false;
770     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
771     if (!CE) return false;
772     int64_t Value = CE->getValue();
773     return Value >= 0 && Value <= 0xffffff;
774   }
775   bool isImmThumbSR() const {
776     if (!isImm()) return false;
777     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
778     if (!CE) return false;
779     int64_t Value = CE->getValue();
780     return Value > 0 && Value < 33;
781   }
782   bool isPKHLSLImm() const {
783     if (!isImm()) return false;
784     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
785     if (!CE) return false;
786     int64_t Value = CE->getValue();
787     return Value >= 0 && Value < 32;
788   }
789   bool isPKHASRImm() const {
790     if (!isImm()) return false;
791     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
792     if (!CE) return false;
793     int64_t Value = CE->getValue();
794     return Value > 0 && Value <= 32;
795   }
796   bool isARMSOImm() const {
797     if (!isImm()) return false;
798     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
799     if (!CE) return false;
800     int64_t Value = CE->getValue();
801     return ARM_AM::getSOImmVal(Value) != -1;
802   }
803   bool isARMSOImmNot() const {
804     if (!isImm()) return false;
805     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
806     if (!CE) return false;
807     int64_t Value = CE->getValue();
808     return ARM_AM::getSOImmVal(~Value) != -1;
809   }
810   bool isARMSOImmNeg() const {
811     if (!isImm()) return false;
812     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
813     if (!CE) return false;
814     int64_t Value = CE->getValue();
815     // Only use this when not representable as a plain so_imm.
816     return ARM_AM::getSOImmVal(Value) == -1 &&
817       ARM_AM::getSOImmVal(-Value) != -1;
818   }
819   bool isT2SOImm() const {
820     if (!isImm()) return false;
821     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
822     if (!CE) return false;
823     int64_t Value = CE->getValue();
824     return ARM_AM::getT2SOImmVal(Value) != -1;
825   }
826   bool isT2SOImmNot() const {
827     if (!isImm()) return false;
828     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
829     if (!CE) return false;
830     int64_t Value = CE->getValue();
831     return ARM_AM::getT2SOImmVal(~Value) != -1;
832   }
833   bool isT2SOImmNeg() const {
834     if (!isImm()) return false;
835     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
836     if (!CE) return false;
837     int64_t Value = CE->getValue();
838     // Only use this when not representable as a plain so_imm.
839     return ARM_AM::getT2SOImmVal(Value) == -1 &&
840       ARM_AM::getT2SOImmVal(-Value) != -1;
841   }
842   bool isSetEndImm() const {
843     if (!isImm()) return false;
844     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
845     if (!CE) return false;
846     int64_t Value = CE->getValue();
847     return Value == 1 || Value == 0;
848   }
849   bool isReg() const { return Kind == k_Register; }
850   bool isRegList() const { return Kind == k_RegisterList; }
851   bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
852   bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
853   bool isToken() const { return Kind == k_Token; }
854   bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
855   bool isMemory() const { return Kind == k_Memory; }
856   bool isShifterImm() const { return Kind == k_ShifterImmediate; }
857   bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
858   bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
859   bool isRotImm() const { return Kind == k_RotateImmediate; }
860   bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
861   bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
862   bool isPostIdxReg() const {
863     return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
864   }
865   bool isMemNoOffset(bool alignOK = false) const {
866     if (!isMemory())
867       return false;
868     // No offset of any kind.
869     return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
870      (alignOK || Memory.Alignment == 0);
871   }
872   bool isMemPCRelImm12() const {
873     if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
874       return false;
875     // Base register must be PC.
876     if (Memory.BaseRegNum != ARM::PC)
877       return false;
878     // Immediate offset in range [-4095, 4095].
879     if (!Memory.OffsetImm) return true;
880     int64_t Val = Memory.OffsetImm->getValue();
881     return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
882   }
883   bool isAlignedMemory() const {
884     return isMemNoOffset(true);
885   }
886   bool isAddrMode2() const {
887     if (!isMemory() || Memory.Alignment != 0) return false;
888     // Check for register offset.
889     if (Memory.OffsetRegNum) return true;
890     // Immediate offset in range [-4095, 4095].
891     if (!Memory.OffsetImm) return true;
892     int64_t Val = Memory.OffsetImm->getValue();
893     return Val > -4096 && Val < 4096;
894   }
895   bool isAM2OffsetImm() const {
896     if (!isImm()) return false;
897     // Immediate offset in range [-4095, 4095].
898     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
899     if (!CE) return false;
900     int64_t Val = CE->getValue();
901     return Val > -4096 && Val < 4096;
902   }
903   bool isAddrMode3() const {
904     // If we have an immediate that's not a constant, treat it as a label
905     // reference needing a fixup. If it is a constant, it's something else
906     // and we reject it.
907     if (isImm() && !isa<MCConstantExpr>(getImm()))
908       return true;
909     if (!isMemory() || Memory.Alignment != 0) return false;
910     // No shifts are legal for AM3.
911     if (Memory.ShiftType != ARM_AM::no_shift) return false;
912     // Check for register offset.
913     if (Memory.OffsetRegNum) return true;
914     // Immediate offset in range [-255, 255].
915     if (!Memory.OffsetImm) return true;
916     int64_t Val = Memory.OffsetImm->getValue();
917     return Val > -256 && Val < 256;
918   }
919   bool isAM3Offset() const {
920     if (Kind != k_Immediate && Kind != k_PostIndexRegister)
921       return false;
922     if (Kind == k_PostIndexRegister)
923       return PostIdxReg.ShiftTy == ARM_AM::no_shift;
924     // Immediate offset in range [-255, 255].
925     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
926     if (!CE) return false;
927     int64_t Val = CE->getValue();
928     // Special case, #-0 is INT32_MIN.
929     return (Val > -256 && Val < 256) || Val == INT32_MIN;
930   }
931   bool isAddrMode5() const {
932     // If we have an immediate that's not a constant, treat it as a label
933     // reference needing a fixup. If it is a constant, it's something else
934     // and we reject it.
935     if (isImm() && !isa<MCConstantExpr>(getImm()))
936       return true;
937     if (!isMemory() || Memory.Alignment != 0) return false;
938     // Check for register offset.
939     if (Memory.OffsetRegNum) return false;
940     // Immediate offset in range [-1020, 1020] and a multiple of 4.
941     if (!Memory.OffsetImm) return true;
942     int64_t Val = Memory.OffsetImm->getValue();
943     return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
944       Val == INT32_MIN;
945   }
946   bool isMemTBB() const {
947     if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
948         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
949       return false;
950     return true;
951   }
952   bool isMemTBH() const {
953     if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
954         Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
955         Memory.Alignment != 0 )
956       return false;
957     return true;
958   }
959   bool isMemRegOffset() const {
960     if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
961       return false;
962     return true;
963   }
964   bool isT2MemRegOffset() const {
965     if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
966         Memory.Alignment != 0)
967       return false;
968     // Only lsl #{0, 1, 2, 3} allowed.
969     if (Memory.ShiftType == ARM_AM::no_shift)
970       return true;
971     if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
972       return false;
973     return true;
974   }
975   bool isMemThumbRR() const {
976     // Thumb reg+reg addressing is simple. Just two registers, a base and
977     // an offset. No shifts, negations or any other complicating factors.
978     if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
979         Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
980       return false;
981     return isARMLowRegister(Memory.BaseRegNum) &&
982       (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
983   }
984   bool isMemThumbRIs4() const {
985     if (!isMemory() || Memory.OffsetRegNum != 0 ||
986         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
987       return false;
988     // Immediate offset, multiple of 4 in range [0, 124].
989     if (!Memory.OffsetImm) return true;
990     int64_t Val = Memory.OffsetImm->getValue();
991     return Val >= 0 && Val <= 124 && (Val % 4) == 0;
992   }
993   bool isMemThumbRIs2() const {
994     if (!isMemory() || Memory.OffsetRegNum != 0 ||
995         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
996       return false;
997     // Immediate offset, multiple of 4 in range [0, 62].
998     if (!Memory.OffsetImm) return true;
999     int64_t Val = Memory.OffsetImm->getValue();
1000     return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1001   }
1002   bool isMemThumbRIs1() const {
1003     if (!isMemory() || Memory.OffsetRegNum != 0 ||
1004         !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1005       return false;
1006     // Immediate offset in range [0, 31].
1007     if (!Memory.OffsetImm) return true;
1008     int64_t Val = Memory.OffsetImm->getValue();
1009     return Val >= 0 && Val <= 31;
1010   }
1011   bool isMemThumbSPI() const {
1012     if (!isMemory() || Memory.OffsetRegNum != 0 ||
1013         Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1014       return false;
1015     // Immediate offset, multiple of 4 in range [0, 1020].
1016     if (!Memory.OffsetImm) return true;
1017     int64_t Val = Memory.OffsetImm->getValue();
1018     return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1019   }
1020   bool isMemImm8s4Offset() const {
1021     // If we have an immediate that's not a constant, treat it as a label
1022     // reference needing a fixup. If it is a constant, it's something else
1023     // and we reject it.
1024     if (isImm() && !isa<MCConstantExpr>(getImm()))
1025       return true;
1026     if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1027       return false;
1028     // Immediate offset a multiple of 4 in range [-1020, 1020].
1029     if (!Memory.OffsetImm) return true;
1030     int64_t Val = Memory.OffsetImm->getValue();
1031     return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1032   }
1033   bool isMemImm0_1020s4Offset() const {
1034     if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1035       return false;
1036     // Immediate offset a multiple of 4 in range [0, 1020].
1037     if (!Memory.OffsetImm) return true;
1038     int64_t Val = Memory.OffsetImm->getValue();
1039     return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1040   }
1041   bool isMemImm8Offset() const {
1042     if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1043       return false;
1044     // Base reg of PC isn't allowed for these encodings.
1045     if (Memory.BaseRegNum == ARM::PC) return false;
1046     // Immediate offset in range [-255, 255].
1047     if (!Memory.OffsetImm) return true;
1048     int64_t Val = Memory.OffsetImm->getValue();
1049     return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1050   }
1051   bool isMemPosImm8Offset() const {
1052     if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1053       return false;
1054     // Immediate offset in range [0, 255].
1055     if (!Memory.OffsetImm) return true;
1056     int64_t Val = Memory.OffsetImm->getValue();
1057     return Val >= 0 && Val < 256;
1058   }
1059   bool isMemNegImm8Offset() const {
1060     if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1061       return false;
1062     // Base reg of PC isn't allowed for these encodings.
1063     if (Memory.BaseRegNum == ARM::PC) return false;
1064     // Immediate offset in range [-255, -1].
1065     if (!Memory.OffsetImm) return false;
1066     int64_t Val = Memory.OffsetImm->getValue();
1067     return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1068   }
1069   bool isMemUImm12Offset() const {
1070     if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1071       return false;
1072     // Immediate offset in range [0, 4095].
1073     if (!Memory.OffsetImm) return true;
1074     int64_t Val = Memory.OffsetImm->getValue();
1075     return (Val >= 0 && Val < 4096);
1076   }
1077   bool isMemImm12Offset() const {
1078     // If we have an immediate that's not a constant, treat it as a label
1079     // reference needing a fixup. If it is a constant, it's something else
1080     // and we reject it.
1081     if (isImm() && !isa<MCConstantExpr>(getImm()))
1082       return true;
1083 
1084     if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1085       return false;
1086     // Immediate offset in range [-4095, 4095].
1087     if (!Memory.OffsetImm) return true;
1088     int64_t Val = Memory.OffsetImm->getValue();
1089     return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1090   }
1091   bool isPostIdxImm8() const {
1092     if (!isImm()) return false;
1093     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1094     if (!CE) return false;
1095     int64_t Val = CE->getValue();
1096     return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1097   }
1098   bool isPostIdxImm8s4() const {
1099     if (!isImm()) return false;
1100     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1101     if (!CE) return false;
1102     int64_t Val = CE->getValue();
1103     return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1104       (Val == INT32_MIN);
1105   }
1106 
1107   bool isMSRMask() const { return Kind == k_MSRMask; }
1108   bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1109 
1110   // NEON operands.
1111   bool isSingleSpacedVectorList() const {
1112     return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1113   }
1114   bool isDoubleSpacedVectorList() const {
1115     return Kind == k_VectorList && VectorList.isDoubleSpaced;
1116   }
1117   bool isVecListOneD() const {
1118     if (!isSingleSpacedVectorList()) return false;
1119     return VectorList.Count == 1;
1120   }
1121 
1122   bool isVecListDPair() const {
1123     if (!isSingleSpacedVectorList()) return false;
1124     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1125               .contains(VectorList.RegNum));
1126   }
1127 
1128   bool isVecListThreeD() const {
1129     if (!isSingleSpacedVectorList()) return false;
1130     return VectorList.Count == 3;
1131   }
1132 
1133   bool isVecListFourD() const {
1134     if (!isSingleSpacedVectorList()) return false;
1135     return VectorList.Count == 4;
1136   }
1137 
1138   bool isVecListDPairSpaced() const {
1139     if (isSingleSpacedVectorList()) return false;
1140     return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1141               .contains(VectorList.RegNum));
1142   }
1143 
1144   bool isVecListThreeQ() const {
1145     if (!isDoubleSpacedVectorList()) return false;
1146     return VectorList.Count == 3;
1147   }
1148 
1149   bool isVecListFourQ() const {
1150     if (!isDoubleSpacedVectorList()) return false;
1151     return VectorList.Count == 4;
1152   }
1153 
1154   bool isSingleSpacedVectorAllLanes() const {
1155     return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1156   }
1157   bool isDoubleSpacedVectorAllLanes() const {
1158     return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1159   }
1160   bool isVecListOneDAllLanes() const {
1161     if (!isSingleSpacedVectorAllLanes()) return false;
1162     return VectorList.Count == 1;
1163   }
1164 
1165   bool isVecListDPairAllLanes() const {
1166     if (!isSingleSpacedVectorAllLanes()) return false;
1167     return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1168               .contains(VectorList.RegNum));
1169   }
1170 
1171   bool isVecListDPairSpacedAllLanes() const {
1172     if (!isDoubleSpacedVectorAllLanes()) return false;
1173     return VectorList.Count == 2;
1174   }
1175 
1176   bool isVecListThreeDAllLanes() const {
1177     if (!isSingleSpacedVectorAllLanes()) return false;
1178     return VectorList.Count == 3;
1179   }
1180 
1181   bool isVecListThreeQAllLanes() const {
1182     if (!isDoubleSpacedVectorAllLanes()) return false;
1183     return VectorList.Count == 3;
1184   }
1185 
1186   bool isVecListFourDAllLanes() const {
1187     if (!isSingleSpacedVectorAllLanes()) return false;
1188     return VectorList.Count == 4;
1189   }
1190 
1191   bool isVecListFourQAllLanes() const {
1192     if (!isDoubleSpacedVectorAllLanes()) return false;
1193     return VectorList.Count == 4;
1194   }
1195 
1196   bool isSingleSpacedVectorIndexed() const {
1197     return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1198   }
1199   bool isDoubleSpacedVectorIndexed() const {
1200     return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1201   }
1202   bool isVecListOneDByteIndexed() const {
1203     if (!isSingleSpacedVectorIndexed()) return false;
1204     return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1205   }
1206 
1207   bool isVecListOneDHWordIndexed() const {
1208     if (!isSingleSpacedVectorIndexed()) return false;
1209     return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1210   }
1211 
1212   bool isVecListOneDWordIndexed() const {
1213     if (!isSingleSpacedVectorIndexed()) return false;
1214     return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1215   }
1216 
1217   bool isVecListTwoDByteIndexed() const {
1218     if (!isSingleSpacedVectorIndexed()) return false;
1219     return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1220   }
1221 
1222   bool isVecListTwoDHWordIndexed() const {
1223     if (!isSingleSpacedVectorIndexed()) return false;
1224     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1225   }
1226 
1227   bool isVecListTwoQWordIndexed() const {
1228     if (!isDoubleSpacedVectorIndexed()) return false;
1229     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1230   }
1231 
1232   bool isVecListTwoQHWordIndexed() const {
1233     if (!isDoubleSpacedVectorIndexed()) return false;
1234     return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1235   }
1236 
1237   bool isVecListTwoDWordIndexed() const {
1238     if (!isSingleSpacedVectorIndexed()) return false;
1239     return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1240   }
1241 
1242   bool isVecListThreeDByteIndexed() const {
1243     if (!isSingleSpacedVectorIndexed()) return false;
1244     return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1245   }
1246 
1247   bool isVecListThreeDHWordIndexed() const {
1248     if (!isSingleSpacedVectorIndexed()) return false;
1249     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1250   }
1251 
1252   bool isVecListThreeQWordIndexed() const {
1253     if (!isDoubleSpacedVectorIndexed()) return false;
1254     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1255   }
1256 
1257   bool isVecListThreeQHWordIndexed() const {
1258     if (!isDoubleSpacedVectorIndexed()) return false;
1259     return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1260   }
1261 
1262   bool isVecListThreeDWordIndexed() const {
1263     if (!isSingleSpacedVectorIndexed()) return false;
1264     return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1265   }
1266 
1267   bool isVecListFourDByteIndexed() const {
1268     if (!isSingleSpacedVectorIndexed()) return false;
1269     return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1270   }
1271 
1272   bool isVecListFourDHWordIndexed() const {
1273     if (!isSingleSpacedVectorIndexed()) return false;
1274     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1275   }
1276 
1277   bool isVecListFourQWordIndexed() const {
1278     if (!isDoubleSpacedVectorIndexed()) return false;
1279     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1280   }
1281 
1282   bool isVecListFourQHWordIndexed() const {
1283     if (!isDoubleSpacedVectorIndexed()) return false;
1284     return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1285   }
1286 
1287   bool isVecListFourDWordIndexed() const {
1288     if (!isSingleSpacedVectorIndexed()) return false;
1289     return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1290   }
1291 
1292   bool isVectorIndex8() const {
1293     if (Kind != k_VectorIndex) return false;
1294     return VectorIndex.Val < 8;
1295   }
1296   bool isVectorIndex16() const {
1297     if (Kind != k_VectorIndex) return false;
1298     return VectorIndex.Val < 4;
1299   }
1300   bool isVectorIndex32() const {
1301     if (Kind != k_VectorIndex) return false;
1302     return VectorIndex.Val < 2;
1303   }
1304 
1305   bool isNEONi8splat() const {
1306     if (!isImm()) return false;
1307     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1308     // Must be a constant.
1309     if (!CE) return false;
1310     int64_t Value = CE->getValue();
1311     // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1312     // value.
1313     return Value >= 0 && Value < 256;
1314   }
1315 
1316   bool isNEONi16splat() const {
1317     if (!isImm()) return false;
1318     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1319     // Must be a constant.
1320     if (!CE) return false;
1321     int64_t Value = CE->getValue();
1322     // i16 value in the range [0,255] or [0x0100, 0xff00]
1323     return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1324   }
1325 
1326   bool isNEONi32splat() const {
1327     if (!isImm()) return false;
1328     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1329     // Must be a constant.
1330     if (!CE) return false;
1331     int64_t Value = CE->getValue();
1332     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1333     return (Value >= 0 && Value < 256) ||
1334       (Value >= 0x0100 && Value <= 0xff00) ||
1335       (Value >= 0x010000 && Value <= 0xff0000) ||
1336       (Value >= 0x01000000 && Value <= 0xff000000);
1337   }
1338 
1339   bool isNEONi32vmov() const {
1340     if (!isImm()) return false;
1341     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1342     // Must be a constant.
1343     if (!CE) return false;
1344     int64_t Value = CE->getValue();
1345     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1346     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1347     return (Value >= 0 && Value < 256) ||
1348       (Value >= 0x0100 && Value <= 0xff00) ||
1349       (Value >= 0x010000 && Value <= 0xff0000) ||
1350       (Value >= 0x01000000 && Value <= 0xff000000) ||
1351       (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1352       (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1353   }
1354   bool isNEONi32vmovNeg() const {
1355     if (!isImm()) return false;
1356     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1357     // Must be a constant.
1358     if (!CE) return false;
1359     int64_t Value = ~CE->getValue();
1360     // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1361     // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1362     return (Value >= 0 && Value < 256) ||
1363       (Value >= 0x0100 && Value <= 0xff00) ||
1364       (Value >= 0x010000 && Value <= 0xff0000) ||
1365       (Value >= 0x01000000 && Value <= 0xff000000) ||
1366       (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1367       (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1368   }
1369 
1370   bool isNEONi64splat() const {
1371     if (!isImm()) return false;
1372     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1373     // Must be a constant.
1374     if (!CE) return false;
1375     uint64_t Value = CE->getValue();
1376     // i64 value with each byte being either 0 or 0xff.
1377     for (unsigned i = 0; i < 8; ++i)
1378       if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1379     return true;
1380   }
1381 
1382   void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1383     // Add as immediates when possible.  Null MCExpr = 0.
1384     if (Expr == 0)
1385       Inst.addOperand(MCOperand::CreateImm(0));
1386     else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1387       Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1388     else
1389       Inst.addOperand(MCOperand::CreateExpr(Expr));
1390   }
1391 
1392   void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1393     assert(N == 2 && "Invalid number of operands!");
1394     Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1395     unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1396     Inst.addOperand(MCOperand::CreateReg(RegNum));
1397   }
1398 
1399   void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1400     assert(N == 1 && "Invalid number of operands!");
1401     Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1402   }
1403 
1404   void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1405     assert(N == 1 && "Invalid number of operands!");
1406     Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1407   }
1408 
1409   void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1410     assert(N == 1 && "Invalid number of operands!");
1411     Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1412   }
1413 
1414   void addITMaskOperands(MCInst &Inst, unsigned N) const {
1415     assert(N == 1 && "Invalid number of operands!");
1416     Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1417   }
1418 
1419   void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1420     assert(N == 1 && "Invalid number of operands!");
1421     Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1422   }
1423 
1424   void addCCOutOperands(MCInst &Inst, unsigned N) const {
1425     assert(N == 1 && "Invalid number of operands!");
1426     Inst.addOperand(MCOperand::CreateReg(getReg()));
1427   }
1428 
1429   void addRegOperands(MCInst &Inst, unsigned N) const {
1430     assert(N == 1 && "Invalid number of operands!");
1431     Inst.addOperand(MCOperand::CreateReg(getReg()));
1432   }
1433 
1434   void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1435     assert(N == 3 && "Invalid number of operands!");
1436     assert(isRegShiftedReg() &&
1437            "addRegShiftedRegOperands() on non RegShiftedReg!");
1438     Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1439     Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1440     Inst.addOperand(MCOperand::CreateImm(
1441       ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1442   }
1443 
1444   void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1445     assert(N == 2 && "Invalid number of operands!");
1446     assert(isRegShiftedImm() &&
1447            "addRegShiftedImmOperands() on non RegShiftedImm!");
1448     Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1449     // Shift of #32 is encoded as 0 where permitted
1450     unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1451     Inst.addOperand(MCOperand::CreateImm(
1452       ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1453   }
1454 
1455   void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1456     assert(N == 1 && "Invalid number of operands!");
1457     Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1458                                          ShifterImm.Imm));
1459   }
1460 
1461   void addRegListOperands(MCInst &Inst, unsigned N) const {
1462     assert(N == 1 && "Invalid number of operands!");
1463     const SmallVectorImpl<unsigned> &RegList = getRegList();
1464     for (SmallVectorImpl<unsigned>::const_iterator
1465            I = RegList.begin(), E = RegList.end(); I != E; ++I)
1466       Inst.addOperand(MCOperand::CreateReg(*I));
1467   }
1468 
1469   void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1470     addRegListOperands(Inst, N);
1471   }
1472 
1473   void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1474     addRegListOperands(Inst, N);
1475   }
1476 
1477   void addRotImmOperands(MCInst &Inst, unsigned N) const {
1478     assert(N == 1 && "Invalid number of operands!");
1479     // Encoded as val>>3. The printer handles display as 8, 16, 24.
1480     Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1481   }
1482 
1483   void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1484     assert(N == 1 && "Invalid number of operands!");
1485     // Munge the lsb/width into a bitfield mask.
1486     unsigned lsb = Bitfield.LSB;
1487     unsigned width = Bitfield.Width;
1488     // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1489     uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1490                       (32 - (lsb + width)));
1491     Inst.addOperand(MCOperand::CreateImm(Mask));
1492   }
1493 
1494   void addImmOperands(MCInst &Inst, unsigned N) const {
1495     assert(N == 1 && "Invalid number of operands!");
1496     addExpr(Inst, getImm());
1497   }
1498 
1499   void addFBits16Operands(MCInst &Inst, unsigned N) const {
1500     assert(N == 1 && "Invalid number of operands!");
1501     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1502     Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1503   }
1504 
1505   void addFBits32Operands(MCInst &Inst, unsigned N) const {
1506     assert(N == 1 && "Invalid number of operands!");
1507     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1508     Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1509   }
1510 
1511   void addFPImmOperands(MCInst &Inst, unsigned N) const {
1512     assert(N == 1 && "Invalid number of operands!");
1513     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1514     int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1515     Inst.addOperand(MCOperand::CreateImm(Val));
1516   }
1517 
1518   void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1519     assert(N == 1 && "Invalid number of operands!");
1520     // FIXME: We really want to scale the value here, but the LDRD/STRD
1521     // instruction don't encode operands that way yet.
1522     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1523     Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1524   }
1525 
1526   void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1527     assert(N == 1 && "Invalid number of operands!");
1528     // The immediate is scaled by four in the encoding and is stored
1529     // in the MCInst as such. Lop off the low two bits here.
1530     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1531     Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1532   }
1533 
1534   void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1535     assert(N == 1 && "Invalid number of operands!");
1536     // The immediate is scaled by four in the encoding and is stored
1537     // in the MCInst as such. Lop off the low two bits here.
1538     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1539     Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1540   }
1541 
1542   void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1543     assert(N == 1 && "Invalid number of operands!");
1544     // The immediate is scaled by four in the encoding and is stored
1545     // in the MCInst as such. Lop off the low two bits here.
1546     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1547     Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1548   }
1549 
1550   void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1551     assert(N == 1 && "Invalid number of operands!");
1552     // The constant encodes as the immediate-1, and we store in the instruction
1553     // the bits as encoded, so subtract off one here.
1554     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1555     Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1556   }
1557 
1558   void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1559     assert(N == 1 && "Invalid number of operands!");
1560     // The constant encodes as the immediate-1, and we store in the instruction
1561     // the bits as encoded, so subtract off one here.
1562     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1563     Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1564   }
1565 
1566   void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1567     assert(N == 1 && "Invalid number of operands!");
1568     // The constant encodes as the immediate, except for 32, which encodes as
1569     // zero.
1570     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1571     unsigned Imm = CE->getValue();
1572     Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1573   }
1574 
1575   void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1576     assert(N == 1 && "Invalid number of operands!");
1577     // An ASR value of 32 encodes as 0, so that's how we want to add it to
1578     // the instruction as well.
1579     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1580     int Val = CE->getValue();
1581     Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1582   }
1583 
1584   void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1585     assert(N == 1 && "Invalid number of operands!");
1586     // The operand is actually a t2_so_imm, but we have its bitwise
1587     // negation in the assembly source, so twiddle it here.
1588     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1589     Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1590   }
1591 
1592   void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1593     assert(N == 1 && "Invalid number of operands!");
1594     // The operand is actually a t2_so_imm, but we have its
1595     // negation in the assembly source, so twiddle it here.
1596     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1597     Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1598   }
1599 
1600   void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1601     assert(N == 1 && "Invalid number of operands!");
1602     // The operand is actually an imm0_4095, but we have its
1603     // negation in the assembly source, so twiddle it here.
1604     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1605     Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1606   }
1607 
1608   void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1609     assert(N == 1 && "Invalid number of operands!");
1610     // The operand is actually a so_imm, but we have its bitwise
1611     // negation in the assembly source, so twiddle it here.
1612     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1613     Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1614   }
1615 
1616   void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1617     assert(N == 1 && "Invalid number of operands!");
1618     // The operand is actually a so_imm, but we have its
1619     // negation in the assembly source, so twiddle it here.
1620     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1621     Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1622   }
1623 
1624   void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1625     assert(N == 1 && "Invalid number of operands!");
1626     Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1627   }
1628 
1629   void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1630     assert(N == 1 && "Invalid number of operands!");
1631     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1632   }
1633 
1634   void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1635     assert(N == 1 && "Invalid number of operands!");
1636     int32_t Imm = Memory.OffsetImm->getValue();
1637     // FIXME: Handle #-0
1638     if (Imm == INT32_MIN) Imm = 0;
1639     Inst.addOperand(MCOperand::CreateImm(Imm));
1640   }
1641 
1642   void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1643     assert(N == 2 && "Invalid number of operands!");
1644     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1645     Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1646   }
1647 
1648   void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1649     assert(N == 3 && "Invalid number of operands!");
1650     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1651     if (!Memory.OffsetRegNum) {
1652       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1653       // Special case for #-0
1654       if (Val == INT32_MIN) Val = 0;
1655       if (Val < 0) Val = -Val;
1656       Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1657     } else {
1658       // For register offset, we encode the shift type and negation flag
1659       // here.
1660       Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1661                               Memory.ShiftImm, Memory.ShiftType);
1662     }
1663     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1664     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1665     Inst.addOperand(MCOperand::CreateImm(Val));
1666   }
1667 
1668   void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1669     assert(N == 2 && "Invalid number of operands!");
1670     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1671     assert(CE && "non-constant AM2OffsetImm operand!");
1672     int32_t Val = CE->getValue();
1673     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1674     // Special case for #-0
1675     if (Val == INT32_MIN) Val = 0;
1676     if (Val < 0) Val = -Val;
1677     Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1678     Inst.addOperand(MCOperand::CreateReg(0));
1679     Inst.addOperand(MCOperand::CreateImm(Val));
1680   }
1681 
1682   void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1683     assert(N == 3 && "Invalid number of operands!");
1684     // If we have an immediate that's not a constant, treat it as a label
1685     // reference needing a fixup. If it is a constant, it's something else
1686     // and we reject it.
1687     if (isImm()) {
1688       Inst.addOperand(MCOperand::CreateExpr(getImm()));
1689       Inst.addOperand(MCOperand::CreateReg(0));
1690       Inst.addOperand(MCOperand::CreateImm(0));
1691       return;
1692     }
1693 
1694     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1695     if (!Memory.OffsetRegNum) {
1696       ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1697       // Special case for #-0
1698       if (Val == INT32_MIN) Val = 0;
1699       if (Val < 0) Val = -Val;
1700       Val = ARM_AM::getAM3Opc(AddSub, Val);
1701     } else {
1702       // For register offset, we encode the shift type and negation flag
1703       // here.
1704       Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1705     }
1706     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1707     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1708     Inst.addOperand(MCOperand::CreateImm(Val));
1709   }
1710 
1711   void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1712     assert(N == 2 && "Invalid number of operands!");
1713     if (Kind == k_PostIndexRegister) {
1714       int32_t Val =
1715         ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1716       Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1717       Inst.addOperand(MCOperand::CreateImm(Val));
1718       return;
1719     }
1720 
1721     // Constant offset.
1722     const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1723     int32_t Val = CE->getValue();
1724     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1725     // Special case for #-0
1726     if (Val == INT32_MIN) Val = 0;
1727     if (Val < 0) Val = -Val;
1728     Val = ARM_AM::getAM3Opc(AddSub, Val);
1729     Inst.addOperand(MCOperand::CreateReg(0));
1730     Inst.addOperand(MCOperand::CreateImm(Val));
1731   }
1732 
1733   void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1734     assert(N == 2 && "Invalid number of operands!");
1735     // If we have an immediate that's not a constant, treat it as a label
1736     // reference needing a fixup. If it is a constant, it's something else
1737     // and we reject it.
1738     if (isImm()) {
1739       Inst.addOperand(MCOperand::CreateExpr(getImm()));
1740       Inst.addOperand(MCOperand::CreateImm(0));
1741       return;
1742     }
1743 
1744     // The lower two bits are always zero and as such are not encoded.
1745     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1746     ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1747     // Special case for #-0
1748     if (Val == INT32_MIN) Val = 0;
1749     if (Val < 0) Val = -Val;
1750     Val = ARM_AM::getAM5Opc(AddSub, Val);
1751     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1752     Inst.addOperand(MCOperand::CreateImm(Val));
1753   }
1754 
1755   void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1756     assert(N == 2 && "Invalid number of operands!");
1757     // If we have an immediate that's not a constant, treat it as a label
1758     // reference needing a fixup. If it is a constant, it's something else
1759     // and we reject it.
1760     if (isImm()) {
1761       Inst.addOperand(MCOperand::CreateExpr(getImm()));
1762       Inst.addOperand(MCOperand::CreateImm(0));
1763       return;
1764     }
1765 
1766     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1767     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1768     Inst.addOperand(MCOperand::CreateImm(Val));
1769   }
1770 
1771   void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1772     assert(N == 2 && "Invalid number of operands!");
1773     // The lower two bits are always zero and as such are not encoded.
1774     int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1775     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1776     Inst.addOperand(MCOperand::CreateImm(Val));
1777   }
1778 
1779   void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1780     assert(N == 2 && "Invalid number of operands!");
1781     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1782     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1783     Inst.addOperand(MCOperand::CreateImm(Val));
1784   }
1785 
1786   void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1787     addMemImm8OffsetOperands(Inst, N);
1788   }
1789 
1790   void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1791     addMemImm8OffsetOperands(Inst, N);
1792   }
1793 
1794   void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1795     assert(N == 2 && "Invalid number of operands!");
1796     // If this is an immediate, it's a label reference.
1797     if (isImm()) {
1798       addExpr(Inst, getImm());
1799       Inst.addOperand(MCOperand::CreateImm(0));
1800       return;
1801     }
1802 
1803     // Otherwise, it's a normal memory reg+offset.
1804     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1805     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1806     Inst.addOperand(MCOperand::CreateImm(Val));
1807   }
1808 
1809   void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1810     assert(N == 2 && "Invalid number of operands!");
1811     // If this is an immediate, it's a label reference.
1812     if (isImm()) {
1813       addExpr(Inst, getImm());
1814       Inst.addOperand(MCOperand::CreateImm(0));
1815       return;
1816     }
1817 
1818     // Otherwise, it's a normal memory reg+offset.
1819     int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1820     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1821     Inst.addOperand(MCOperand::CreateImm(Val));
1822   }
1823 
1824   void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1825     assert(N == 2 && "Invalid number of operands!");
1826     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1827     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1828   }
1829 
1830   void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1831     assert(N == 2 && "Invalid number of operands!");
1832     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1833     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1834   }
1835 
1836   void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1837     assert(N == 3 && "Invalid number of operands!");
1838     unsigned Val =
1839       ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1840                         Memory.ShiftImm, Memory.ShiftType);
1841     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1842     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1843     Inst.addOperand(MCOperand::CreateImm(Val));
1844   }
1845 
1846   void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1847     assert(N == 3 && "Invalid number of operands!");
1848     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1849     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1850     Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1851   }
1852 
1853   void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1854     assert(N == 2 && "Invalid number of operands!");
1855     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1856     Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1857   }
1858 
1859   void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1860     assert(N == 2 && "Invalid number of operands!");
1861     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1862     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1863     Inst.addOperand(MCOperand::CreateImm(Val));
1864   }
1865 
1866   void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1867     assert(N == 2 && "Invalid number of operands!");
1868     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1869     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1870     Inst.addOperand(MCOperand::CreateImm(Val));
1871   }
1872 
1873   void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1874     assert(N == 2 && "Invalid number of operands!");
1875     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1876     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1877     Inst.addOperand(MCOperand::CreateImm(Val));
1878   }
1879 
1880   void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1881     assert(N == 2 && "Invalid number of operands!");
1882     int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1883     Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1884     Inst.addOperand(MCOperand::CreateImm(Val));
1885   }
1886 
1887   void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1888     assert(N == 1 && "Invalid number of operands!");
1889     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1890     assert(CE && "non-constant post-idx-imm8 operand!");
1891     int Imm = CE->getValue();
1892     bool isAdd = Imm >= 0;
1893     if (Imm == INT32_MIN) Imm = 0;
1894     Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1895     Inst.addOperand(MCOperand::CreateImm(Imm));
1896   }
1897 
1898   void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1899     assert(N == 1 && "Invalid number of operands!");
1900     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1901     assert(CE && "non-constant post-idx-imm8s4 operand!");
1902     int Imm = CE->getValue();
1903     bool isAdd = Imm >= 0;
1904     if (Imm == INT32_MIN) Imm = 0;
1905     // Immediate is scaled by 4.
1906     Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1907     Inst.addOperand(MCOperand::CreateImm(Imm));
1908   }
1909 
1910   void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1911     assert(N == 2 && "Invalid number of operands!");
1912     Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1913     Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1914   }
1915 
1916   void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1917     assert(N == 2 && "Invalid number of operands!");
1918     Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1919     // The sign, shift type, and shift amount are encoded in a single operand
1920     // using the AM2 encoding helpers.
1921     ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1922     unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1923                                      PostIdxReg.ShiftTy);
1924     Inst.addOperand(MCOperand::CreateImm(Imm));
1925   }
1926 
1927   void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1928     assert(N == 1 && "Invalid number of operands!");
1929     Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1930   }
1931 
1932   void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1933     assert(N == 1 && "Invalid number of operands!");
1934     Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1935   }
1936 
1937   void addVecListOperands(MCInst &Inst, unsigned N) const {
1938     assert(N == 1 && "Invalid number of operands!");
1939     Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1940   }
1941 
1942   void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1943     assert(N == 2 && "Invalid number of operands!");
1944     Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1945     Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1946   }
1947 
1948   void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1949     assert(N == 1 && "Invalid number of operands!");
1950     Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1951   }
1952 
1953   void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1954     assert(N == 1 && "Invalid number of operands!");
1955     Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1956   }
1957 
1958   void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1959     assert(N == 1 && "Invalid number of operands!");
1960     Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1961   }
1962 
1963   void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1964     assert(N == 1 && "Invalid number of operands!");
1965     // The immediate encodes the type of constant as well as the value.
1966     // Mask in that this is an i8 splat.
1967     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1968     Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1969   }
1970 
1971   void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1972     assert(N == 1 && "Invalid number of operands!");
1973     // The immediate encodes the type of constant as well as the value.
1974     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1975     unsigned Value = CE->getValue();
1976     if (Value >= 256)
1977       Value = (Value >> 8) | 0xa00;
1978     else
1979       Value |= 0x800;
1980     Inst.addOperand(MCOperand::CreateImm(Value));
1981   }
1982 
1983   void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1984     assert(N == 1 && "Invalid number of operands!");
1985     // The immediate encodes the type of constant as well as the value.
1986     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1987     unsigned Value = CE->getValue();
1988     if (Value >= 256 && Value <= 0xff00)
1989       Value = (Value >> 8) | 0x200;
1990     else if (Value > 0xffff && Value <= 0xff0000)
1991       Value = (Value >> 16) | 0x400;
1992     else if (Value > 0xffffff)
1993       Value = (Value >> 24) | 0x600;
1994     Inst.addOperand(MCOperand::CreateImm(Value));
1995   }
1996 
1997   void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1998     assert(N == 1 && "Invalid number of operands!");
1999     // The immediate encodes the type of constant as well as the value.
2000     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2001     unsigned Value = CE->getValue();
2002     if (Value >= 256 && Value <= 0xffff)
2003       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2004     else if (Value > 0xffff && Value <= 0xffffff)
2005       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2006     else if (Value > 0xffffff)
2007       Value = (Value >> 24) | 0x600;
2008     Inst.addOperand(MCOperand::CreateImm(Value));
2009   }
2010 
2011   void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2012     assert(N == 1 && "Invalid number of operands!");
2013     // The immediate encodes the type of constant as well as the value.
2014     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2015     unsigned Value = ~CE->getValue();
2016     if (Value >= 256 && Value <= 0xffff)
2017       Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2018     else if (Value > 0xffff && Value <= 0xffffff)
2019       Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2020     else if (Value > 0xffffff)
2021       Value = (Value >> 24) | 0x600;
2022     Inst.addOperand(MCOperand::CreateImm(Value));
2023   }
2024 
2025   void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2026     assert(N == 1 && "Invalid number of operands!");
2027     // The immediate encodes the type of constant as well as the value.
2028     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2029     uint64_t Value = CE->getValue();
2030     unsigned Imm = 0;
2031     for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2032       Imm |= (Value & 1) << i;
2033     }
2034     Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2035   }
2036 
2037   virtual void print(raw_ostream &OS) const;
2038 
2039   static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
2040     ARMOperand *Op = new ARMOperand(k_ITCondMask);
2041     Op->ITMask.Mask = Mask;
2042     Op->StartLoc = S;
2043     Op->EndLoc = S;
2044     return Op;
2045   }
2046 
2047   static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2048     ARMOperand *Op = new ARMOperand(k_CondCode);
2049     Op->CC.Val = CC;
2050     Op->StartLoc = S;
2051     Op->EndLoc = S;
2052     return Op;
2053   }
2054 
2055   static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2056     ARMOperand *Op = new ARMOperand(k_CoprocNum);
2057     Op->Cop.Val = CopVal;
2058     Op->StartLoc = S;
2059     Op->EndLoc = S;
2060     return Op;
2061   }
2062 
2063   static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2064     ARMOperand *Op = new ARMOperand(k_CoprocReg);
2065     Op->Cop.Val = CopVal;
2066     Op->StartLoc = S;
2067     Op->EndLoc = S;
2068     return Op;
2069   }
2070 
2071   static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2072     ARMOperand *Op = new ARMOperand(k_CoprocOption);
2073     Op->Cop.Val = Val;
2074     Op->StartLoc = S;
2075     Op->EndLoc = E;
2076     return Op;
2077   }
2078 
2079   static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2080     ARMOperand *Op = new ARMOperand(k_CCOut);
2081     Op->Reg.RegNum = RegNum;
2082     Op->StartLoc = S;
2083     Op->EndLoc = S;
2084     return Op;
2085   }
2086 
2087   static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2088     ARMOperand *Op = new ARMOperand(k_Token);
2089     Op->Tok.Data = Str.data();
2090     Op->Tok.Length = Str.size();
2091     Op->StartLoc = S;
2092     Op->EndLoc = S;
2093     return Op;
2094   }
2095 
2096   static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2097     ARMOperand *Op = new ARMOperand(k_Register);
2098     Op->Reg.RegNum = RegNum;
2099     Op->StartLoc = S;
2100     Op->EndLoc = E;
2101     return Op;
2102   }
2103 
2104   static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2105                                            unsigned SrcReg,
2106                                            unsigned ShiftReg,
2107                                            unsigned ShiftImm,
2108                                            SMLoc S, SMLoc E) {
2109     ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2110     Op->RegShiftedReg.ShiftTy = ShTy;
2111     Op->RegShiftedReg.SrcReg = SrcReg;
2112     Op->RegShiftedReg.ShiftReg = ShiftReg;
2113     Op->RegShiftedReg.ShiftImm = ShiftImm;
2114     Op->StartLoc = S;
2115     Op->EndLoc = E;
2116     return Op;
2117   }
2118 
2119   static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2120                                             unsigned SrcReg,
2121                                             unsigned ShiftImm,
2122                                             SMLoc S, SMLoc E) {
2123     ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2124     Op->RegShiftedImm.ShiftTy = ShTy;
2125     Op->RegShiftedImm.SrcReg = SrcReg;
2126     Op->RegShiftedImm.ShiftImm = ShiftImm;
2127     Op->StartLoc = S;
2128     Op->EndLoc = E;
2129     return Op;
2130   }
2131 
2132   static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2133                                    SMLoc S, SMLoc E) {
2134     ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2135     Op->ShifterImm.isASR = isASR;
2136     Op->ShifterImm.Imm = Imm;
2137     Op->StartLoc = S;
2138     Op->EndLoc = E;
2139     return Op;
2140   }
2141 
2142   static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2143     ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2144     Op->RotImm.Imm = Imm;
2145     Op->StartLoc = S;
2146     Op->EndLoc = E;
2147     return Op;
2148   }
2149 
2150   static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2151                                     SMLoc S, SMLoc E) {
2152     ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2153     Op->Bitfield.LSB = LSB;
2154     Op->Bitfield.Width = Width;
2155     Op->StartLoc = S;
2156     Op->EndLoc = E;
2157     return Op;
2158   }
2159 
2160   static ARMOperand *
2161   CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2162                 SMLoc StartLoc, SMLoc EndLoc) {
2163     KindTy Kind = k_RegisterList;
2164 
2165     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2166       Kind = k_DPRRegisterList;
2167     else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2168              contains(Regs.front().first))
2169       Kind = k_SPRRegisterList;
2170 
2171     ARMOperand *Op = new ARMOperand(Kind);
2172     for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2173            I = Regs.begin(), E = Regs.end(); I != E; ++I)
2174       Op->Registers.push_back(I->first);
2175     array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2176     Op->StartLoc = StartLoc;
2177     Op->EndLoc = EndLoc;
2178     return Op;
2179   }
2180 
2181   static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2182                                       bool isDoubleSpaced, SMLoc S, SMLoc E) {
2183     ARMOperand *Op = new ARMOperand(k_VectorList);
2184     Op->VectorList.RegNum = RegNum;
2185     Op->VectorList.Count = Count;
2186     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2187     Op->StartLoc = S;
2188     Op->EndLoc = E;
2189     return Op;
2190   }
2191 
2192   static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2193                                               bool isDoubleSpaced,
2194                                               SMLoc S, SMLoc E) {
2195     ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2196     Op->VectorList.RegNum = RegNum;
2197     Op->VectorList.Count = Count;
2198     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2199     Op->StartLoc = S;
2200     Op->EndLoc = E;
2201     return Op;
2202   }
2203 
2204   static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2205                                              unsigned Index,
2206                                              bool isDoubleSpaced,
2207                                              SMLoc S, SMLoc E) {
2208     ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2209     Op->VectorList.RegNum = RegNum;
2210     Op->VectorList.Count = Count;
2211     Op->VectorList.LaneIndex = Index;
2212     Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2213     Op->StartLoc = S;
2214     Op->EndLoc = E;
2215     return Op;
2216   }
2217 
2218   static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2219                                        MCContext &Ctx) {
2220     ARMOperand *Op = new ARMOperand(k_VectorIndex);
2221     Op->VectorIndex.Val = Idx;
2222     Op->StartLoc = S;
2223     Op->EndLoc = E;
2224     return Op;
2225   }
2226 
2227   static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2228     ARMOperand *Op = new ARMOperand(k_Immediate);
2229     Op->Imm.Val = Val;
2230     Op->StartLoc = S;
2231     Op->EndLoc = E;
2232     return Op;
2233   }
2234 
2235   static ARMOperand *CreateMem(unsigned BaseRegNum,
2236                                const MCConstantExpr *OffsetImm,
2237                                unsigned OffsetRegNum,
2238                                ARM_AM::ShiftOpc ShiftType,
2239                                unsigned ShiftImm,
2240                                unsigned Alignment,
2241                                bool isNegative,
2242                                SMLoc S, SMLoc E) {
2243     ARMOperand *Op = new ARMOperand(k_Memory);
2244     Op->Memory.BaseRegNum = BaseRegNum;
2245     Op->Memory.OffsetImm = OffsetImm;
2246     Op->Memory.OffsetRegNum = OffsetRegNum;
2247     Op->Memory.ShiftType = ShiftType;
2248     Op->Memory.ShiftImm = ShiftImm;
2249     Op->Memory.Alignment = Alignment;
2250     Op->Memory.isNegative = isNegative;
2251     Op->StartLoc = S;
2252     Op->EndLoc = E;
2253     return Op;
2254   }
2255 
2256   static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2257                                       ARM_AM::ShiftOpc ShiftTy,
2258                                       unsigned ShiftImm,
2259                                       SMLoc S, SMLoc E) {
2260     ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2261     Op->PostIdxReg.RegNum = RegNum;
2262     Op->PostIdxReg.isAdd = isAdd;
2263     Op->PostIdxReg.ShiftTy = ShiftTy;
2264     Op->PostIdxReg.ShiftImm = ShiftImm;
2265     Op->StartLoc = S;
2266     Op->EndLoc = E;
2267     return Op;
2268   }
2269 
2270   static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2271     ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2272     Op->MBOpt.Val = Opt;
2273     Op->StartLoc = S;
2274     Op->EndLoc = S;
2275     return Op;
2276   }
2277 
2278   static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2279     ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2280     Op->IFlags.Val = IFlags;
2281     Op->StartLoc = S;
2282     Op->EndLoc = S;
2283     return Op;
2284   }
2285 
2286   static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2287     ARMOperand *Op = new ARMOperand(k_MSRMask);
2288     Op->MMask.Val = MMask;
2289     Op->StartLoc = S;
2290     Op->EndLoc = S;
2291     return Op;
2292   }
2293 };
2294 
2295 } // end anonymous namespace.
2296 
2297 void ARMOperand::print(raw_ostream &OS) const {
2298   switch (Kind) {
2299   case k_CondCode:
2300     OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2301     break;
2302   case k_CCOut:
2303     OS << "<ccout " << getReg() << ">";
2304     break;
2305   case k_ITCondMask: {
2306     static const char *MaskStr[] = {
2307       "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2308       "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2309     };
2310     assert((ITMask.Mask & 0xf) == ITMask.Mask);
2311     OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2312     break;
2313   }
2314   case k_CoprocNum:
2315     OS << "<coprocessor number: " << getCoproc() << ">";
2316     break;
2317   case k_CoprocReg:
2318     OS << "<coprocessor register: " << getCoproc() << ">";
2319     break;
2320   case k_CoprocOption:
2321     OS << "<coprocessor option: " << CoprocOption.Val << ">";
2322     break;
2323   case k_MSRMask:
2324     OS << "<mask: " << getMSRMask() << ">";
2325     break;
2326   case k_Immediate:
2327     getImm()->print(OS);
2328     break;
2329   case k_MemBarrierOpt:
2330     OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2331     break;
2332   case k_Memory:
2333     OS << "<memory "
2334        << " base:" << Memory.BaseRegNum;
2335     OS << ">";
2336     break;
2337   case k_PostIndexRegister:
2338     OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2339        << PostIdxReg.RegNum;
2340     if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2341       OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2342          << PostIdxReg.ShiftImm;
2343     OS << ">";
2344     break;
2345   case k_ProcIFlags: {
2346     OS << "<ARM_PROC::";
2347     unsigned IFlags = getProcIFlags();
2348     for (int i=2; i >= 0; --i)
2349       if (IFlags & (1 << i))
2350         OS << ARM_PROC::IFlagsToString(1 << i);
2351     OS << ">";
2352     break;
2353   }
2354   case k_Register:
2355     OS << "<register " << getReg() << ">";
2356     break;
2357   case k_ShifterImmediate:
2358     OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2359        << " #" << ShifterImm.Imm << ">";
2360     break;
2361   case k_ShiftedRegister:
2362     OS << "<so_reg_reg "
2363        << RegShiftedReg.SrcReg << " "
2364        << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2365        << " " << RegShiftedReg.ShiftReg << ">";
2366     break;
2367   case k_ShiftedImmediate:
2368     OS << "<so_reg_imm "
2369        << RegShiftedImm.SrcReg << " "
2370        << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2371        << " #" << RegShiftedImm.ShiftImm << ">";
2372     break;
2373   case k_RotateImmediate:
2374     OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2375     break;
2376   case k_BitfieldDescriptor:
2377     OS << "<bitfield " << "lsb: " << Bitfield.LSB
2378        << ", width: " << Bitfield.Width << ">";
2379     break;
2380   case k_RegisterList:
2381   case k_DPRRegisterList:
2382   case k_SPRRegisterList: {
2383     OS << "<register_list ";
2384 
2385     const SmallVectorImpl<unsigned> &RegList = getRegList();
2386     for (SmallVectorImpl<unsigned>::const_iterator
2387            I = RegList.begin(), E = RegList.end(); I != E; ) {
2388       OS << *I;
2389       if (++I < E) OS << ", ";
2390     }
2391 
2392     OS << ">";
2393     break;
2394   }
2395   case k_VectorList:
2396     OS << "<vector_list " << VectorList.Count << " * "
2397        << VectorList.RegNum << ">";
2398     break;
2399   case k_VectorListAllLanes:
2400     OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2401        << VectorList.RegNum << ">";
2402     break;
2403   case k_VectorListIndexed:
2404     OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2405        << VectorList.Count << " * " << VectorList.RegNum << ">";
2406     break;
2407   case k_Token:
2408     OS << "'" << getToken() << "'";
2409     break;
2410   case k_VectorIndex:
2411     OS << "<vectorindex " << getVectorIndex() << ">";
2412     break;
2413   }
2414 }
2415 
2416 /// @name Auto-generated Match Functions
2417 /// {
2418 
2419 static unsigned MatchRegisterName(StringRef Name);
2420 
2421 /// }
2422 
2423 bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2424                                  SMLoc &StartLoc, SMLoc &EndLoc) {
2425   StartLoc = Parser.getTok().getLoc();
2426   RegNo = tryParseRegister();
2427   EndLoc = Parser.getTok().getLoc();
2428 
2429   return (RegNo == (unsigned)-1);
2430 }
2431 
2432 /// Try to parse a register name.  The token must be an Identifier when called,
2433 /// and if it is a register name the token is eaten and the register number is
2434 /// returned.  Otherwise return -1.
2435 ///
2436 int ARMAsmParser::tryParseRegister() {
2437   const AsmToken &Tok = Parser.getTok();
2438   if (Tok.isNot(AsmToken::Identifier)) return -1;
2439 
2440   std::string lowerCase = Tok.getString().lower();
2441   unsigned RegNum = MatchRegisterName(lowerCase);
2442   if (!RegNum) {
2443     RegNum = StringSwitch<unsigned>(lowerCase)
2444       .Case("r13", ARM::SP)
2445       .Case("r14", ARM::LR)
2446       .Case("r15", ARM::PC)
2447       .Case("ip", ARM::R12)
2448       // Additional register name aliases for 'gas' compatibility.
2449       .Case("a1", ARM::R0)
2450       .Case("a2", ARM::R1)
2451       .Case("a3", ARM::R2)
2452       .Case("a4", ARM::R3)
2453       .Case("v1", ARM::R4)
2454       .Case("v2", ARM::R5)
2455       .Case("v3", ARM::R6)
2456       .Case("v4", ARM::R7)
2457       .Case("v5", ARM::R8)
2458       .Case("v6", ARM::R9)
2459       .Case("v7", ARM::R10)
2460       .Case("v8", ARM::R11)
2461       .Case("sb", ARM::R9)
2462       .Case("sl", ARM::R10)
2463       .Case("fp", ARM::R11)
2464       .Default(0);
2465   }
2466   if (!RegNum) {
2467     // Check for aliases registered via .req. Canonicalize to lower case.
2468     // That's more consistent since register names are case insensitive, and
2469     // it's how the original entry was passed in from MC/MCParser/AsmParser.
2470     StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2471     // If no match, return failure.
2472     if (Entry == RegisterReqs.end())
2473       return -1;
2474     Parser.Lex(); // Eat identifier token.
2475     return Entry->getValue();
2476   }
2477 
2478   Parser.Lex(); // Eat identifier token.
2479 
2480   return RegNum;
2481 }
2482 
2483 // Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2484 // If a recoverable error occurs, return 1. If an irrecoverable error
2485 // occurs, return -1. An irrecoverable error is one where tokens have been
2486 // consumed in the process of trying to parse the shifter (i.e., when it is
2487 // indeed a shifter operand, but malformed).
2488 int ARMAsmParser::tryParseShiftRegister(
2489                                SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2490   SMLoc S = Parser.getTok().getLoc();
2491   const AsmToken &Tok = Parser.getTok();
2492   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2493 
2494   std::string lowerCase = Tok.getString().lower();
2495   ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2496       .Case("asl", ARM_AM::lsl)
2497       .Case("lsl", ARM_AM::lsl)
2498       .Case("lsr", ARM_AM::lsr)
2499       .Case("asr", ARM_AM::asr)
2500       .Case("ror", ARM_AM::ror)
2501       .Case("rrx", ARM_AM::rrx)
2502       .Default(ARM_AM::no_shift);
2503 
2504   if (ShiftTy == ARM_AM::no_shift)
2505     return 1;
2506 
2507   Parser.Lex(); // Eat the operator.
2508 
2509   // The source register for the shift has already been added to the
2510   // operand list, so we need to pop it off and combine it into the shifted
2511   // register operand instead.
2512   OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2513   if (!PrevOp->isReg())
2514     return Error(PrevOp->getStartLoc(), "shift must be of a register");
2515   int SrcReg = PrevOp->getReg();
2516   int64_t Imm = 0;
2517   int ShiftReg = 0;
2518   if (ShiftTy == ARM_AM::rrx) {
2519     // RRX Doesn't have an explicit shift amount. The encoder expects
2520     // the shift register to be the same as the source register. Seems odd,
2521     // but OK.
2522     ShiftReg = SrcReg;
2523   } else {
2524     // Figure out if this is shifted by a constant or a register (for non-RRX).
2525     if (Parser.getTok().is(AsmToken::Hash) ||
2526         Parser.getTok().is(AsmToken::Dollar)) {
2527       Parser.Lex(); // Eat hash.
2528       SMLoc ImmLoc = Parser.getTok().getLoc();
2529       const MCExpr *ShiftExpr = 0;
2530       if (getParser().ParseExpression(ShiftExpr)) {
2531         Error(ImmLoc, "invalid immediate shift value");
2532         return -1;
2533       }
2534       // The expression must be evaluatable as an immediate.
2535       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2536       if (!CE) {
2537         Error(ImmLoc, "invalid immediate shift value");
2538         return -1;
2539       }
2540       // Range check the immediate.
2541       // lsl, ror: 0 <= imm <= 31
2542       // lsr, asr: 0 <= imm <= 32
2543       Imm = CE->getValue();
2544       if (Imm < 0 ||
2545           ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2546           ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2547         Error(ImmLoc, "immediate shift value out of range");
2548         return -1;
2549       }
2550       // shift by zero is a nop. Always send it through as lsl.
2551       // ('as' compatibility)
2552       if (Imm == 0)
2553         ShiftTy = ARM_AM::lsl;
2554     } else if (Parser.getTok().is(AsmToken::Identifier)) {
2555       ShiftReg = tryParseRegister();
2556       SMLoc L = Parser.getTok().getLoc();
2557       if (ShiftReg == -1) {
2558         Error (L, "expected immediate or register in shift operand");
2559         return -1;
2560       }
2561     } else {
2562       Error (Parser.getTok().getLoc(),
2563                     "expected immediate or register in shift operand");
2564       return -1;
2565     }
2566   }
2567 
2568   if (ShiftReg && ShiftTy != ARM_AM::rrx)
2569     Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2570                                                          ShiftReg, Imm,
2571                                                S, Parser.getTok().getLoc()));
2572   else
2573     Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2574                                                S, Parser.getTok().getLoc()));
2575 
2576   return 0;
2577 }
2578 
2579 
2580 /// Try to parse a register name.  The token must be an Identifier when called.
2581 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
2582 /// if there is a "writeback". 'true' if it's not a register.
2583 ///
2584 /// TODO this is likely to change to allow different register types and or to
2585 /// parse for a specific register type.
2586 bool ARMAsmParser::
2587 tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2588   SMLoc S = Parser.getTok().getLoc();
2589   int RegNo = tryParseRegister();
2590   if (RegNo == -1)
2591     return true;
2592 
2593   Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2594 
2595   const AsmToken &ExclaimTok = Parser.getTok();
2596   if (ExclaimTok.is(AsmToken::Exclaim)) {
2597     Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2598                                                ExclaimTok.getLoc()));
2599     Parser.Lex(); // Eat exclaim token
2600     return false;
2601   }
2602 
2603   // Also check for an index operand. This is only legal for vector registers,
2604   // but that'll get caught OK in operand matching, so we don't need to
2605   // explicitly filter everything else out here.
2606   if (Parser.getTok().is(AsmToken::LBrac)) {
2607     SMLoc SIdx = Parser.getTok().getLoc();
2608     Parser.Lex(); // Eat left bracket token.
2609 
2610     const MCExpr *ImmVal;
2611     if (getParser().ParseExpression(ImmVal))
2612       return true;
2613     const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2614     if (!MCE)
2615       return TokError("immediate value expected for vector index");
2616 
2617     SMLoc E = Parser.getTok().getLoc();
2618     if (Parser.getTok().isNot(AsmToken::RBrac))
2619       return Error(E, "']' expected");
2620 
2621     Parser.Lex(); // Eat right bracket token.
2622 
2623     Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2624                                                      SIdx, E,
2625                                                      getContext()));
2626   }
2627 
2628   return false;
2629 }
2630 
2631 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
2632 /// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2633 /// "c5", ...
2634 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2635   // Use the same layout as the tablegen'erated register name matcher. Ugly,
2636   // but efficient.
2637   switch (Name.size()) {
2638   default: return -1;
2639   case 2:
2640     if (Name[0] != CoprocOp)
2641       return -1;
2642     switch (Name[1]) {
2643     default:  return -1;
2644     case '0': return 0;
2645     case '1': return 1;
2646     case '2': return 2;
2647     case '3': return 3;
2648     case '4': return 4;
2649     case '5': return 5;
2650     case '6': return 6;
2651     case '7': return 7;
2652     case '8': return 8;
2653     case '9': return 9;
2654     }
2655   case 3:
2656     if (Name[0] != CoprocOp || Name[1] != '1')
2657       return -1;
2658     switch (Name[2]) {
2659     default:  return -1;
2660     case '0': return 10;
2661     case '1': return 11;
2662     case '2': return 12;
2663     case '3': return 13;
2664     case '4': return 14;
2665     case '5': return 15;
2666     }
2667   }
2668 }
2669 
2670 /// parseITCondCode - Try to parse a condition code for an IT instruction.
2671 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2672 parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2673   SMLoc S = Parser.getTok().getLoc();
2674   const AsmToken &Tok = Parser.getTok();
2675   if (!Tok.is(AsmToken::Identifier))
2676     return MatchOperand_NoMatch;
2677   unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
2678     .Case("eq", ARMCC::EQ)
2679     .Case("ne", ARMCC::NE)
2680     .Case("hs", ARMCC::HS)
2681     .Case("cs", ARMCC::HS)
2682     .Case("lo", ARMCC::LO)
2683     .Case("cc", ARMCC::LO)
2684     .Case("mi", ARMCC::MI)
2685     .Case("pl", ARMCC::PL)
2686     .Case("vs", ARMCC::VS)
2687     .Case("vc", ARMCC::VC)
2688     .Case("hi", ARMCC::HI)
2689     .Case("ls", ARMCC::LS)
2690     .Case("ge", ARMCC::GE)
2691     .Case("lt", ARMCC::LT)
2692     .Case("gt", ARMCC::GT)
2693     .Case("le", ARMCC::LE)
2694     .Case("al", ARMCC::AL)
2695     .Default(~0U);
2696   if (CC == ~0U)
2697     return MatchOperand_NoMatch;
2698   Parser.Lex(); // Eat the token.
2699 
2700   Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2701 
2702   return MatchOperand_Success;
2703 }
2704 
2705 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2706 /// token must be an Identifier when called, and if it is a coprocessor
2707 /// number, the token is eaten and the operand is added to the operand list.
2708 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2709 parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2710   SMLoc S = Parser.getTok().getLoc();
2711   const AsmToken &Tok = Parser.getTok();
2712   if (Tok.isNot(AsmToken::Identifier))
2713     return MatchOperand_NoMatch;
2714 
2715   int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2716   if (Num == -1)
2717     return MatchOperand_NoMatch;
2718 
2719   Parser.Lex(); // Eat identifier token.
2720   Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2721   return MatchOperand_Success;
2722 }
2723 
2724 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2725 /// token must be an Identifier when called, and if it is a coprocessor
2726 /// number, the token is eaten and the operand is added to the operand list.
2727 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2728 parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2729   SMLoc S = Parser.getTok().getLoc();
2730   const AsmToken &Tok = Parser.getTok();
2731   if (Tok.isNot(AsmToken::Identifier))
2732     return MatchOperand_NoMatch;
2733 
2734   int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2735   if (Reg == -1)
2736     return MatchOperand_NoMatch;
2737 
2738   Parser.Lex(); // Eat identifier token.
2739   Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2740   return MatchOperand_Success;
2741 }
2742 
2743 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2744 /// coproc_option : '{' imm0_255 '}'
2745 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2746 parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2747   SMLoc S = Parser.getTok().getLoc();
2748 
2749   // If this isn't a '{', this isn't a coprocessor immediate operand.
2750   if (Parser.getTok().isNot(AsmToken::LCurly))
2751     return MatchOperand_NoMatch;
2752   Parser.Lex(); // Eat the '{'
2753 
2754   const MCExpr *Expr;
2755   SMLoc Loc = Parser.getTok().getLoc();
2756   if (getParser().ParseExpression(Expr)) {
2757     Error(Loc, "illegal expression");
2758     return MatchOperand_ParseFail;
2759   }
2760   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2761   if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2762     Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2763     return MatchOperand_ParseFail;
2764   }
2765   int Val = CE->getValue();
2766 
2767   // Check for and consume the closing '}'
2768   if (Parser.getTok().isNot(AsmToken::RCurly))
2769     return MatchOperand_ParseFail;
2770   SMLoc E = Parser.getTok().getLoc();
2771   Parser.Lex(); // Eat the '}'
2772 
2773   Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2774   return MatchOperand_Success;
2775 }
2776 
2777 // For register list parsing, we need to map from raw GPR register numbering
2778 // to the enumeration values. The enumeration values aren't sorted by
2779 // register number due to our using "sp", "lr" and "pc" as canonical names.
2780 static unsigned getNextRegister(unsigned Reg) {
2781   // If this is a GPR, we need to do it manually, otherwise we can rely
2782   // on the sort ordering of the enumeration since the other reg-classes
2783   // are sane.
2784   if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2785     return Reg + 1;
2786   switch(Reg) {
2787   default: llvm_unreachable("Invalid GPR number!");
2788   case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2789   case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2790   case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2791   case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2792   case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2793   case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2794   case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2795   case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2796   }
2797 }
2798 
2799 // Return the low-subreg of a given Q register.
2800 static unsigned getDRegFromQReg(unsigned QReg) {
2801   switch (QReg) {
2802   default: llvm_unreachable("expected a Q register!");
2803   case ARM::Q0:  return ARM::D0;
2804   case ARM::Q1:  return ARM::D2;
2805   case ARM::Q2:  return ARM::D4;
2806   case ARM::Q3:  return ARM::D6;
2807   case ARM::Q4:  return ARM::D8;
2808   case ARM::Q5:  return ARM::D10;
2809   case ARM::Q6:  return ARM::D12;
2810   case ARM::Q7:  return ARM::D14;
2811   case ARM::Q8:  return ARM::D16;
2812   case ARM::Q9:  return ARM::D18;
2813   case ARM::Q10: return ARM::D20;
2814   case ARM::Q11: return ARM::D22;
2815   case ARM::Q12: return ARM::D24;
2816   case ARM::Q13: return ARM::D26;
2817   case ARM::Q14: return ARM::D28;
2818   case ARM::Q15: return ARM::D30;
2819   }
2820 }
2821 
2822 /// Parse a register list.
2823 bool ARMAsmParser::
2824 parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2825   assert(Parser.getTok().is(AsmToken::LCurly) &&
2826          "Token is not a Left Curly Brace");
2827   SMLoc S = Parser.getTok().getLoc();
2828   Parser.Lex(); // Eat '{' token.
2829   SMLoc RegLoc = Parser.getTok().getLoc();
2830 
2831   // Check the first register in the list to see what register class
2832   // this is a list of.
2833   int Reg = tryParseRegister();
2834   if (Reg == -1)
2835     return Error(RegLoc, "register expected");
2836 
2837   // The reglist instructions have at most 16 registers, so reserve
2838   // space for that many.
2839   SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2840 
2841   // Allow Q regs and just interpret them as the two D sub-registers.
2842   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2843     Reg = getDRegFromQReg(Reg);
2844     Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2845     ++Reg;
2846   }
2847   const MCRegisterClass *RC;
2848   if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2849     RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2850   else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2851     RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2852   else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2853     RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2854   else
2855     return Error(RegLoc, "invalid register in register list");
2856 
2857   // Store the register.
2858   Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2859 
2860   // This starts immediately after the first register token in the list,
2861   // so we can see either a comma or a minus (range separator) as a legal
2862   // next token.
2863   while (Parser.getTok().is(AsmToken::Comma) ||
2864          Parser.getTok().is(AsmToken::Minus)) {
2865     if (Parser.getTok().is(AsmToken::Minus)) {
2866       Parser.Lex(); // Eat the minus.
2867       SMLoc EndLoc = Parser.getTok().getLoc();
2868       int EndReg = tryParseRegister();
2869       if (EndReg == -1)
2870         return Error(EndLoc, "register expected");
2871       // Allow Q regs and just interpret them as the two D sub-registers.
2872       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2873         EndReg = getDRegFromQReg(EndReg) + 1;
2874       // If the register is the same as the start reg, there's nothing
2875       // more to do.
2876       if (Reg == EndReg)
2877         continue;
2878       // The register must be in the same register class as the first.
2879       if (!RC->contains(EndReg))
2880         return Error(EndLoc, "invalid register in register list");
2881       // Ranges must go from low to high.
2882       if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2883         return Error(EndLoc, "bad range in register list");
2884 
2885       // Add all the registers in the range to the register list.
2886       while (Reg != EndReg) {
2887         Reg = getNextRegister(Reg);
2888         Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2889       }
2890       continue;
2891     }
2892     Parser.Lex(); // Eat the comma.
2893     RegLoc = Parser.getTok().getLoc();
2894     int OldReg = Reg;
2895     const AsmToken RegTok = Parser.getTok();
2896     Reg = tryParseRegister();
2897     if (Reg == -1)
2898       return Error(RegLoc, "register expected");
2899     // Allow Q regs and just interpret them as the two D sub-registers.
2900     bool isQReg = false;
2901     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2902       Reg = getDRegFromQReg(Reg);
2903       isQReg = true;
2904     }
2905     // The register must be in the same register class as the first.
2906     if (!RC->contains(Reg))
2907       return Error(RegLoc, "invalid register in register list");
2908     // List must be monotonically increasing.
2909     if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) {
2910       if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2911         Warning(RegLoc, "register list not in ascending order");
2912       else
2913         return Error(RegLoc, "register list not in ascending order");
2914     }
2915     if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2916       Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2917               ") in register list");
2918       continue;
2919     }
2920     // VFP register lists must also be contiguous.
2921     // It's OK to use the enumeration values directly here rather, as the
2922     // VFP register classes have the enum sorted properly.
2923     if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2924         Reg != OldReg + 1)
2925       return Error(RegLoc, "non-contiguous register range");
2926     Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2927     if (isQReg)
2928       Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2929   }
2930 
2931   SMLoc E = Parser.getTok().getLoc();
2932   if (Parser.getTok().isNot(AsmToken::RCurly))
2933     return Error(E, "'}' expected");
2934   Parser.Lex(); // Eat '}' token.
2935 
2936   // Push the register list operand.
2937   Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2938 
2939   // The ARM system instruction variants for LDM/STM have a '^' token here.
2940   if (Parser.getTok().is(AsmToken::Caret)) {
2941     Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2942     Parser.Lex(); // Eat '^' token.
2943   }
2944 
2945   return false;
2946 }
2947 
2948 // Helper function to parse the lane index for vector lists.
2949 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2950 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2951   Index = 0; // Always return a defined index value.
2952   if (Parser.getTok().is(AsmToken::LBrac)) {
2953     Parser.Lex(); // Eat the '['.
2954     if (Parser.getTok().is(AsmToken::RBrac)) {
2955       // "Dn[]" is the 'all lanes' syntax.
2956       LaneKind = AllLanes;
2957       Parser.Lex(); // Eat the ']'.
2958       return MatchOperand_Success;
2959     }
2960 
2961     // There's an optional '#' token here. Normally there wouldn't be, but
2962     // inline assemble puts one in, and it's friendly to accept that.
2963     if (Parser.getTok().is(AsmToken::Hash))
2964       Parser.Lex(); // Eat the '#'
2965 
2966     const MCExpr *LaneIndex;
2967     SMLoc Loc = Parser.getTok().getLoc();
2968     if (getParser().ParseExpression(LaneIndex)) {
2969       Error(Loc, "illegal expression");
2970       return MatchOperand_ParseFail;
2971     }
2972     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2973     if (!CE) {
2974       Error(Loc, "lane index must be empty or an integer");
2975       return MatchOperand_ParseFail;
2976     }
2977     if (Parser.getTok().isNot(AsmToken::RBrac)) {
2978       Error(Parser.getTok().getLoc(), "']' expected");
2979       return MatchOperand_ParseFail;
2980     }
2981     Parser.Lex(); // Eat the ']'.
2982     int64_t Val = CE->getValue();
2983 
2984     // FIXME: Make this range check context sensitive for .8, .16, .32.
2985     if (Val < 0 || Val > 7) {
2986       Error(Parser.getTok().getLoc(), "lane index out of range");
2987       return MatchOperand_ParseFail;
2988     }
2989     Index = Val;
2990     LaneKind = IndexedLane;
2991     return MatchOperand_Success;
2992   }
2993   LaneKind = NoLanes;
2994   return MatchOperand_Success;
2995 }
2996 
2997 // parse a vector register list
2998 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2999 parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3000   VectorLaneTy LaneKind;
3001   unsigned LaneIndex;
3002   SMLoc S = Parser.getTok().getLoc();
3003   // As an extension (to match gas), support a plain D register or Q register
3004   // (without encosing curly braces) as a single or double entry list,
3005   // respectively.
3006   if (Parser.getTok().is(AsmToken::Identifier)) {
3007     int Reg = tryParseRegister();
3008     if (Reg == -1)
3009       return MatchOperand_NoMatch;
3010     SMLoc E = Parser.getTok().getLoc();
3011     if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3012       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
3013       if (Res != MatchOperand_Success)
3014         return Res;
3015       switch (LaneKind) {
3016       case NoLanes:
3017         E = Parser.getTok().getLoc();
3018         Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3019         break;
3020       case AllLanes:
3021         E = Parser.getTok().getLoc();
3022         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3023                                                                 S, E));
3024         break;
3025       case IndexedLane:
3026         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3027                                                                LaneIndex,
3028                                                                false, S, E));
3029         break;
3030       }
3031       return MatchOperand_Success;
3032     }
3033     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3034       Reg = getDRegFromQReg(Reg);
3035       OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
3036       if (Res != MatchOperand_Success)
3037         return Res;
3038       switch (LaneKind) {
3039       case NoLanes:
3040         E = Parser.getTok().getLoc();
3041         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3042                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3043         Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3044         break;
3045       case AllLanes:
3046         E = Parser.getTok().getLoc();
3047         Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3048                                    &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3049         Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3050                                                                 S, E));
3051         break;
3052       case IndexedLane:
3053         Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3054                                                                LaneIndex,
3055                                                                false, S, E));
3056         break;
3057       }
3058       return MatchOperand_Success;
3059     }
3060     Error(S, "vector register expected");
3061     return MatchOperand_ParseFail;
3062   }
3063 
3064   if (Parser.getTok().isNot(AsmToken::LCurly))
3065     return MatchOperand_NoMatch;
3066 
3067   Parser.Lex(); // Eat '{' token.
3068   SMLoc RegLoc = Parser.getTok().getLoc();
3069 
3070   int Reg = tryParseRegister();
3071   if (Reg == -1) {
3072     Error(RegLoc, "register expected");
3073     return MatchOperand_ParseFail;
3074   }
3075   unsigned Count = 1;
3076   int Spacing = 0;
3077   unsigned FirstReg = Reg;
3078   // The list is of D registers, but we also allow Q regs and just interpret
3079   // them as the two D sub-registers.
3080   if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3081     FirstReg = Reg = getDRegFromQReg(Reg);
3082     Spacing = 1; // double-spacing requires explicit D registers, otherwise
3083                  // it's ambiguous with four-register single spaced.
3084     ++Reg;
3085     ++Count;
3086   }
3087   if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
3088     return MatchOperand_ParseFail;
3089 
3090   while (Parser.getTok().is(AsmToken::Comma) ||
3091          Parser.getTok().is(AsmToken::Minus)) {
3092     if (Parser.getTok().is(AsmToken::Minus)) {
3093       if (!Spacing)
3094         Spacing = 1; // Register range implies a single spaced list.
3095       else if (Spacing == 2) {
3096         Error(Parser.getTok().getLoc(),
3097               "sequential registers in double spaced list");
3098         return MatchOperand_ParseFail;
3099       }
3100       Parser.Lex(); // Eat the minus.
3101       SMLoc EndLoc = Parser.getTok().getLoc();
3102       int EndReg = tryParseRegister();
3103       if (EndReg == -1) {
3104         Error(EndLoc, "register expected");
3105         return MatchOperand_ParseFail;
3106       }
3107       // Allow Q regs and just interpret them as the two D sub-registers.
3108       if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3109         EndReg = getDRegFromQReg(EndReg) + 1;
3110       // If the register is the same as the start reg, there's nothing
3111       // more to do.
3112       if (Reg == EndReg)
3113         continue;
3114       // The register must be in the same register class as the first.
3115       if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3116         Error(EndLoc, "invalid register in register list");
3117         return MatchOperand_ParseFail;
3118       }
3119       // Ranges must go from low to high.
3120       if (Reg > EndReg) {
3121         Error(EndLoc, "bad range in register list");
3122         return MatchOperand_ParseFail;
3123       }
3124       // Parse the lane specifier if present.
3125       VectorLaneTy NextLaneKind;
3126       unsigned NextLaneIndex;
3127       if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3128         return MatchOperand_ParseFail;
3129       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3130         Error(EndLoc, "mismatched lane index in register list");
3131         return MatchOperand_ParseFail;
3132       }
3133       EndLoc = Parser.getTok().getLoc();
3134 
3135       // Add all the registers in the range to the register list.
3136       Count += EndReg - Reg;
3137       Reg = EndReg;
3138       continue;
3139     }
3140     Parser.Lex(); // Eat the comma.
3141     RegLoc = Parser.getTok().getLoc();
3142     int OldReg = Reg;
3143     Reg = tryParseRegister();
3144     if (Reg == -1) {
3145       Error(RegLoc, "register expected");
3146       return MatchOperand_ParseFail;
3147     }
3148     // vector register lists must be contiguous.
3149     // It's OK to use the enumeration values directly here rather, as the
3150     // VFP register classes have the enum sorted properly.
3151     //
3152     // The list is of D registers, but we also allow Q regs and just interpret
3153     // them as the two D sub-registers.
3154     if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3155       if (!Spacing)
3156         Spacing = 1; // Register range implies a single spaced list.
3157       else if (Spacing == 2) {
3158         Error(RegLoc,
3159               "invalid register in double-spaced list (must be 'D' register')");
3160         return MatchOperand_ParseFail;
3161       }
3162       Reg = getDRegFromQReg(Reg);
3163       if (Reg != OldReg + 1) {
3164         Error(RegLoc, "non-contiguous register range");
3165         return MatchOperand_ParseFail;
3166       }
3167       ++Reg;
3168       Count += 2;
3169       // Parse the lane specifier if present.
3170       VectorLaneTy NextLaneKind;
3171       unsigned NextLaneIndex;
3172       SMLoc EndLoc = Parser.getTok().getLoc();
3173       if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3174         return MatchOperand_ParseFail;
3175       if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3176         Error(EndLoc, "mismatched lane index in register list");
3177         return MatchOperand_ParseFail;
3178       }
3179       continue;
3180     }
3181     // Normal D register.
3182     // Figure out the register spacing (single or double) of the list if
3183     // we don't know it already.
3184     if (!Spacing)
3185       Spacing = 1 + (Reg == OldReg + 2);
3186 
3187     // Just check that it's contiguous and keep going.
3188     if (Reg != OldReg + Spacing) {
3189       Error(RegLoc, "non-contiguous register range");
3190       return MatchOperand_ParseFail;
3191     }
3192     ++Count;
3193     // Parse the lane specifier if present.
3194     VectorLaneTy NextLaneKind;
3195     unsigned NextLaneIndex;
3196     SMLoc EndLoc = Parser.getTok().getLoc();
3197     if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3198       return MatchOperand_ParseFail;
3199     if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3200       Error(EndLoc, "mismatched lane index in register list");
3201       return MatchOperand_ParseFail;
3202     }
3203   }
3204 
3205   SMLoc E = Parser.getTok().getLoc();
3206   if (Parser.getTok().isNot(AsmToken::RCurly)) {
3207     Error(E, "'}' expected");
3208     return MatchOperand_ParseFail;
3209   }
3210   Parser.Lex(); // Eat '}' token.
3211 
3212   switch (LaneKind) {
3213   case NoLanes:
3214     // Two-register operands have been converted to the
3215     // composite register classes.
3216     if (Count == 2) {
3217       const MCRegisterClass *RC = (Spacing == 1) ?
3218         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3219         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3220       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3221     }
3222 
3223     Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3224                                                     (Spacing == 2), S, E));
3225     break;
3226   case AllLanes:
3227     // Two-register operands have been converted to the
3228     // composite register classes.
3229     if (Count == 2) {
3230       const MCRegisterClass *RC = (Spacing == 1) ?
3231         &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3232         &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3233       FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3234     }
3235     Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3236                                                             (Spacing == 2),
3237                                                             S, E));
3238     break;
3239   case IndexedLane:
3240     Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3241                                                            LaneIndex,
3242                                                            (Spacing == 2),
3243                                                            S, E));
3244     break;
3245   }
3246   return MatchOperand_Success;
3247 }
3248 
3249 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3250 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3251 parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3252   SMLoc S = Parser.getTok().getLoc();
3253   const AsmToken &Tok = Parser.getTok();
3254   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3255   StringRef OptStr = Tok.getString();
3256 
3257   unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3258     .Case("sy",    ARM_MB::SY)
3259     .Case("st",    ARM_MB::ST)
3260     .Case("sh",    ARM_MB::ISH)
3261     .Case("ish",   ARM_MB::ISH)
3262     .Case("shst",  ARM_MB::ISHST)
3263     .Case("ishst", ARM_MB::ISHST)
3264     .Case("nsh",   ARM_MB::NSH)
3265     .Case("un",    ARM_MB::NSH)
3266     .Case("nshst", ARM_MB::NSHST)
3267     .Case("unst",  ARM_MB::NSHST)
3268     .Case("osh",   ARM_MB::OSH)
3269     .Case("oshst", ARM_MB::OSHST)
3270     .Default(~0U);
3271 
3272   if (Opt == ~0U)
3273     return MatchOperand_NoMatch;
3274 
3275   Parser.Lex(); // Eat identifier token.
3276   Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3277   return MatchOperand_Success;
3278 }
3279 
3280 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3281 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3282 parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3283   SMLoc S = Parser.getTok().getLoc();
3284   const AsmToken &Tok = Parser.getTok();
3285   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3286   StringRef IFlagsStr = Tok.getString();
3287 
3288   // An iflags string of "none" is interpreted to mean that none of the AIF
3289   // bits are set.  Not a terribly useful instruction, but a valid encoding.
3290   unsigned IFlags = 0;
3291   if (IFlagsStr != "none") {
3292         for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3293       unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3294         .Case("a", ARM_PROC::A)
3295         .Case("i", ARM_PROC::I)
3296         .Case("f", ARM_PROC::F)
3297         .Default(~0U);
3298 
3299       // If some specific iflag is already set, it means that some letter is
3300       // present more than once, this is not acceptable.
3301       if (Flag == ~0U || (IFlags & Flag))
3302         return MatchOperand_NoMatch;
3303 
3304       IFlags |= Flag;
3305     }
3306   }
3307 
3308   Parser.Lex(); // Eat identifier token.
3309   Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3310   return MatchOperand_Success;
3311 }
3312 
3313 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3314 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3315 parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3316   SMLoc S = Parser.getTok().getLoc();
3317   const AsmToken &Tok = Parser.getTok();
3318   assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3319   StringRef Mask = Tok.getString();
3320 
3321   if (isMClass()) {
3322     // See ARMv6-M 10.1.1
3323     std::string Name = Mask.lower();
3324     unsigned FlagsVal = StringSwitch<unsigned>(Name)
3325       .Case("apsr", 0)
3326       .Case("iapsr", 1)
3327       .Case("eapsr", 2)
3328       .Case("xpsr", 3)
3329       .Case("ipsr", 5)
3330       .Case("epsr", 6)
3331       .Case("iepsr", 7)
3332       .Case("msp", 8)
3333       .Case("psp", 9)
3334       .Case("primask", 16)
3335       .Case("basepri", 17)
3336       .Case("basepri_max", 18)
3337       .Case("faultmask", 19)
3338       .Case("control", 20)
3339       .Default(~0U);
3340 
3341     if (FlagsVal == ~0U)
3342       return MatchOperand_NoMatch;
3343 
3344     if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3345       // basepri, basepri_max and faultmask only valid for V7m.
3346       return MatchOperand_NoMatch;
3347 
3348     Parser.Lex(); // Eat identifier token.
3349     Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3350     return MatchOperand_Success;
3351   }
3352 
3353   // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3354   size_t Start = 0, Next = Mask.find('_');
3355   StringRef Flags = "";
3356   std::string SpecReg = Mask.slice(Start, Next).lower();
3357   if (Next != StringRef::npos)
3358     Flags = Mask.slice(Next+1, Mask.size());
3359 
3360   // FlagsVal contains the complete mask:
3361   // 3-0: Mask
3362   // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3363   unsigned FlagsVal = 0;
3364 
3365   if (SpecReg == "apsr") {
3366     FlagsVal = StringSwitch<unsigned>(Flags)
3367     .Case("nzcvq",  0x8) // same as CPSR_f
3368     .Case("g",      0x4) // same as CPSR_s
3369     .Case("nzcvqg", 0xc) // same as CPSR_fs
3370     .Default(~0U);
3371 
3372     if (FlagsVal == ~0U) {
3373       if (!Flags.empty())
3374         return MatchOperand_NoMatch;
3375       else
3376         FlagsVal = 8; // No flag
3377     }
3378   } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3379     // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3380     if (Flags == "all" || Flags == "")
3381       Flags = "fc";
3382     for (int i = 0, e = Flags.size(); i != e; ++i) {
3383       unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3384       .Case("c", 1)
3385       .Case("x", 2)
3386       .Case("s", 4)
3387       .Case("f", 8)
3388       .Default(~0U);
3389 
3390       // If some specific flag is already set, it means that some letter is
3391       // present more than once, this is not acceptable.
3392       if (FlagsVal == ~0U || (FlagsVal & Flag))
3393         return MatchOperand_NoMatch;
3394       FlagsVal |= Flag;
3395     }
3396   } else // No match for special register.
3397     return MatchOperand_NoMatch;
3398 
3399   // Special register without flags is NOT equivalent to "fc" flags.
3400   // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3401   // two lines would enable gas compatibility at the expense of breaking
3402   // round-tripping.
3403   //
3404   // if (!FlagsVal)
3405   //  FlagsVal = 0x9;
3406 
3407   // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3408   if (SpecReg == "spsr")
3409     FlagsVal |= 16;
3410 
3411   Parser.Lex(); // Eat identifier token.
3412   Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3413   return MatchOperand_Success;
3414 }
3415 
3416 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3417 parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3418             int Low, int High) {
3419   const AsmToken &Tok = Parser.getTok();
3420   if (Tok.isNot(AsmToken::Identifier)) {
3421     Error(Parser.getTok().getLoc(), Op + " operand expected.");
3422     return MatchOperand_ParseFail;
3423   }
3424   StringRef ShiftName = Tok.getString();
3425   std::string LowerOp = Op.lower();
3426   std::string UpperOp = Op.upper();
3427   if (ShiftName != LowerOp && ShiftName != UpperOp) {
3428     Error(Parser.getTok().getLoc(), Op + " operand expected.");
3429     return MatchOperand_ParseFail;
3430   }
3431   Parser.Lex(); // Eat shift type token.
3432 
3433   // There must be a '#' and a shift amount.
3434   if (Parser.getTok().isNot(AsmToken::Hash) &&
3435       Parser.getTok().isNot(AsmToken::Dollar)) {
3436     Error(Parser.getTok().getLoc(), "'#' expected");
3437     return MatchOperand_ParseFail;
3438   }
3439   Parser.Lex(); // Eat hash token.
3440 
3441   const MCExpr *ShiftAmount;
3442   SMLoc Loc = Parser.getTok().getLoc();
3443   if (getParser().ParseExpression(ShiftAmount)) {
3444     Error(Loc, "illegal expression");
3445     return MatchOperand_ParseFail;
3446   }
3447   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3448   if (!CE) {
3449     Error(Loc, "constant expression expected");
3450     return MatchOperand_ParseFail;
3451   }
3452   int Val = CE->getValue();
3453   if (Val < Low || Val > High) {
3454     Error(Loc, "immediate value out of range");
3455     return MatchOperand_ParseFail;
3456   }
3457 
3458   Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3459 
3460   return MatchOperand_Success;
3461 }
3462 
3463 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3464 parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3465   const AsmToken &Tok = Parser.getTok();
3466   SMLoc S = Tok.getLoc();
3467   if (Tok.isNot(AsmToken::Identifier)) {
3468     Error(Tok.getLoc(), "'be' or 'le' operand expected");
3469     return MatchOperand_ParseFail;
3470   }
3471   int Val = StringSwitch<int>(Tok.getString())
3472     .Case("be", 1)
3473     .Case("le", 0)
3474     .Default(-1);
3475   Parser.Lex(); // Eat the token.
3476 
3477   if (Val == -1) {
3478     Error(Tok.getLoc(), "'be' or 'le' operand expected");
3479     return MatchOperand_ParseFail;
3480   }
3481   Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3482                                                                   getContext()),
3483                                            S, Parser.getTok().getLoc()));
3484   return MatchOperand_Success;
3485 }
3486 
3487 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3488 /// instructions. Legal values are:
3489 ///     lsl #n  'n' in [0,31]
3490 ///     asr #n  'n' in [1,32]
3491 ///             n == 32 encoded as n == 0.
3492 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3493 parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3494   const AsmToken &Tok = Parser.getTok();
3495   SMLoc S = Tok.getLoc();
3496   if (Tok.isNot(AsmToken::Identifier)) {
3497     Error(S, "shift operator 'asr' or 'lsl' expected");
3498     return MatchOperand_ParseFail;
3499   }
3500   StringRef ShiftName = Tok.getString();
3501   bool isASR;
3502   if (ShiftName == "lsl" || ShiftName == "LSL")
3503     isASR = false;
3504   else if (ShiftName == "asr" || ShiftName == "ASR")
3505     isASR = true;
3506   else {
3507     Error(S, "shift operator 'asr' or 'lsl' expected");
3508     return MatchOperand_ParseFail;
3509   }
3510   Parser.Lex(); // Eat the operator.
3511 
3512   // A '#' and a shift amount.
3513   if (Parser.getTok().isNot(AsmToken::Hash) &&
3514       Parser.getTok().isNot(AsmToken::Dollar)) {
3515     Error(Parser.getTok().getLoc(), "'#' expected");
3516     return MatchOperand_ParseFail;
3517   }
3518   Parser.Lex(); // Eat hash token.
3519 
3520   const MCExpr *ShiftAmount;
3521   SMLoc E = Parser.getTok().getLoc();
3522   if (getParser().ParseExpression(ShiftAmount)) {
3523     Error(E, "malformed shift expression");
3524     return MatchOperand_ParseFail;
3525   }
3526   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3527   if (!CE) {
3528     Error(E, "shift amount must be an immediate");
3529     return MatchOperand_ParseFail;
3530   }
3531 
3532   int64_t Val = CE->getValue();
3533   if (isASR) {
3534     // Shift amount must be in [1,32]
3535     if (Val < 1 || Val > 32) {
3536       Error(E, "'asr' shift amount must be in range [1,32]");
3537       return MatchOperand_ParseFail;
3538     }
3539     // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3540     if (isThumb() && Val == 32) {
3541       Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3542       return MatchOperand_ParseFail;
3543     }
3544     if (Val == 32) Val = 0;
3545   } else {
3546     // Shift amount must be in [1,32]
3547     if (Val < 0 || Val > 31) {
3548       Error(E, "'lsr' shift amount must be in range [0,31]");
3549       return MatchOperand_ParseFail;
3550     }
3551   }
3552 
3553   E = Parser.getTok().getLoc();
3554   Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3555 
3556   return MatchOperand_Success;
3557 }
3558 
3559 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3560 /// of instructions. Legal values are:
3561 ///     ror #n  'n' in {0, 8, 16, 24}
3562 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3563 parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3564   const AsmToken &Tok = Parser.getTok();
3565   SMLoc S = Tok.getLoc();
3566   if (Tok.isNot(AsmToken::Identifier))
3567     return MatchOperand_NoMatch;
3568   StringRef ShiftName = Tok.getString();
3569   if (ShiftName != "ror" && ShiftName != "ROR")
3570     return MatchOperand_NoMatch;
3571   Parser.Lex(); // Eat the operator.
3572 
3573   // A '#' and a rotate amount.
3574   if (Parser.getTok().isNot(AsmToken::Hash) &&
3575       Parser.getTok().isNot(AsmToken::Dollar)) {
3576     Error(Parser.getTok().getLoc(), "'#' expected");
3577     return MatchOperand_ParseFail;
3578   }
3579   Parser.Lex(); // Eat hash token.
3580 
3581   const MCExpr *ShiftAmount;
3582   SMLoc E = Parser.getTok().getLoc();
3583   if (getParser().ParseExpression(ShiftAmount)) {
3584     Error(E, "malformed rotate expression");
3585     return MatchOperand_ParseFail;
3586   }
3587   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3588   if (!CE) {
3589     Error(E, "rotate amount must be an immediate");
3590     return MatchOperand_ParseFail;
3591   }
3592 
3593   int64_t Val = CE->getValue();
3594   // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3595   // normally, zero is represented in asm by omitting the rotate operand
3596   // entirely.
3597   if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3598     Error(E, "'ror' rotate amount must be 8, 16, or 24");
3599     return MatchOperand_ParseFail;
3600   }
3601 
3602   E = Parser.getTok().getLoc();
3603   Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3604 
3605   return MatchOperand_Success;
3606 }
3607 
3608 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3609 parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3610   SMLoc S = Parser.getTok().getLoc();
3611   // The bitfield descriptor is really two operands, the LSB and the width.
3612   if (Parser.getTok().isNot(AsmToken::Hash) &&
3613       Parser.getTok().isNot(AsmToken::Dollar)) {
3614     Error(Parser.getTok().getLoc(), "'#' expected");
3615     return MatchOperand_ParseFail;
3616   }
3617   Parser.Lex(); // Eat hash token.
3618 
3619   const MCExpr *LSBExpr;
3620   SMLoc E = Parser.getTok().getLoc();
3621   if (getParser().ParseExpression(LSBExpr)) {
3622     Error(E, "malformed immediate expression");
3623     return MatchOperand_ParseFail;
3624   }
3625   const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3626   if (!CE) {
3627     Error(E, "'lsb' operand must be an immediate");
3628     return MatchOperand_ParseFail;
3629   }
3630 
3631   int64_t LSB = CE->getValue();
3632   // The LSB must be in the range [0,31]
3633   if (LSB < 0 || LSB > 31) {
3634     Error(E, "'lsb' operand must be in the range [0,31]");
3635     return MatchOperand_ParseFail;
3636   }
3637   E = Parser.getTok().getLoc();
3638 
3639   // Expect another immediate operand.
3640   if (Parser.getTok().isNot(AsmToken::Comma)) {
3641     Error(Parser.getTok().getLoc(), "too few operands");
3642     return MatchOperand_ParseFail;
3643   }
3644   Parser.Lex(); // Eat hash token.
3645   if (Parser.getTok().isNot(AsmToken::Hash) &&
3646       Parser.getTok().isNot(AsmToken::Dollar)) {
3647     Error(Parser.getTok().getLoc(), "'#' expected");
3648     return MatchOperand_ParseFail;
3649   }
3650   Parser.Lex(); // Eat hash token.
3651 
3652   const MCExpr *WidthExpr;
3653   if (getParser().ParseExpression(WidthExpr)) {
3654     Error(E, "malformed immediate expression");
3655     return MatchOperand_ParseFail;
3656   }
3657   CE = dyn_cast<MCConstantExpr>(WidthExpr);
3658   if (!CE) {
3659     Error(E, "'width' operand must be an immediate");
3660     return MatchOperand_ParseFail;
3661   }
3662 
3663   int64_t Width = CE->getValue();
3664   // The LSB must be in the range [1,32-lsb]
3665   if (Width < 1 || Width > 32 - LSB) {
3666     Error(E, "'width' operand must be in the range [1,32-lsb]");
3667     return MatchOperand_ParseFail;
3668   }
3669   E = Parser.getTok().getLoc();
3670 
3671   Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3672 
3673   return MatchOperand_Success;
3674 }
3675 
3676 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3677 parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3678   // Check for a post-index addressing register operand. Specifically:
3679   // postidx_reg := '+' register {, shift}
3680   //              | '-' register {, shift}
3681   //              | register {, shift}
3682 
3683   // This method must return MatchOperand_NoMatch without consuming any tokens
3684   // in the case where there is no match, as other alternatives take other
3685   // parse methods.
3686   AsmToken Tok = Parser.getTok();
3687   SMLoc S = Tok.getLoc();
3688   bool haveEaten = false;
3689   bool isAdd = true;
3690   int Reg = -1;
3691   if (Tok.is(AsmToken::Plus)) {
3692     Parser.Lex(); // Eat the '+' token.
3693     haveEaten = true;
3694   } else if (Tok.is(AsmToken::Minus)) {
3695     Parser.Lex(); // Eat the '-' token.
3696     isAdd = false;
3697     haveEaten = true;
3698   }
3699   if (Parser.getTok().is(AsmToken::Identifier))
3700     Reg = tryParseRegister();
3701   if (Reg == -1) {
3702     if (!haveEaten)
3703       return MatchOperand_NoMatch;
3704     Error(Parser.getTok().getLoc(), "register expected");
3705     return MatchOperand_ParseFail;
3706   }
3707   SMLoc E = Parser.getTok().getLoc();
3708 
3709   ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3710   unsigned ShiftImm = 0;
3711   if (Parser.getTok().is(AsmToken::Comma)) {
3712     Parser.Lex(); // Eat the ','.
3713     if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3714       return MatchOperand_ParseFail;
3715   }
3716 
3717   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3718                                                   ShiftImm, S, E));
3719 
3720   return MatchOperand_Success;
3721 }
3722 
3723 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3724 parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3725   // Check for a post-index addressing register operand. Specifically:
3726   // am3offset := '+' register
3727   //              | '-' register
3728   //              | register
3729   //              | # imm
3730   //              | # + imm
3731   //              | # - imm
3732 
3733   // This method must return MatchOperand_NoMatch without consuming any tokens
3734   // in the case where there is no match, as other alternatives take other
3735   // parse methods.
3736   AsmToken Tok = Parser.getTok();
3737   SMLoc S = Tok.getLoc();
3738 
3739   // Do immediates first, as we always parse those if we have a '#'.
3740   if (Parser.getTok().is(AsmToken::Hash) ||
3741       Parser.getTok().is(AsmToken::Dollar)) {
3742     Parser.Lex(); // Eat the '#'.
3743     // Explicitly look for a '-', as we need to encode negative zero
3744     // differently.
3745     bool isNegative = Parser.getTok().is(AsmToken::Minus);
3746     const MCExpr *Offset;
3747     if (getParser().ParseExpression(Offset))
3748       return MatchOperand_ParseFail;
3749     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3750     if (!CE) {
3751       Error(S, "constant expression expected");
3752       return MatchOperand_ParseFail;
3753     }
3754     SMLoc E = Tok.getLoc();
3755     // Negative zero is encoded as the flag value INT32_MIN.
3756     int32_t Val = CE->getValue();
3757     if (isNegative && Val == 0)
3758       Val = INT32_MIN;
3759 
3760     Operands.push_back(
3761       ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3762 
3763     return MatchOperand_Success;
3764   }
3765 
3766 
3767   bool haveEaten = false;
3768   bool isAdd = true;
3769   int Reg = -1;
3770   if (Tok.is(AsmToken::Plus)) {
3771     Parser.Lex(); // Eat the '+' token.
3772     haveEaten = true;
3773   } else if (Tok.is(AsmToken::Minus)) {
3774     Parser.Lex(); // Eat the '-' token.
3775     isAdd = false;
3776     haveEaten = true;
3777   }
3778   if (Parser.getTok().is(AsmToken::Identifier))
3779     Reg = tryParseRegister();
3780   if (Reg == -1) {
3781     if (!haveEaten)
3782       return MatchOperand_NoMatch;
3783     Error(Parser.getTok().getLoc(), "register expected");
3784     return MatchOperand_ParseFail;
3785   }
3786   SMLoc E = Parser.getTok().getLoc();
3787 
3788   Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3789                                                   0, S, E));
3790 
3791   return MatchOperand_Success;
3792 }
3793 
3794 /// cvtT2LdrdPre - Convert parsed operands to MCInst.
3795 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3796 /// when they refer multiple MIOperands inside a single one.
3797 bool ARMAsmParser::
3798 cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3799              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3800   // Rt, Rt2
3801   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3802   ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3803   // Create a writeback register dummy placeholder.
3804   Inst.addOperand(MCOperand::CreateReg(0));
3805   // addr
3806   ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3807   // pred
3808   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3809   return true;
3810 }
3811 
3812 /// cvtT2StrdPre - Convert parsed operands to MCInst.
3813 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3814 /// when they refer multiple MIOperands inside a single one.
3815 bool ARMAsmParser::
3816 cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3817              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3818   // Create a writeback register dummy placeholder.
3819   Inst.addOperand(MCOperand::CreateReg(0));
3820   // Rt, Rt2
3821   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3822   ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3823   // addr
3824   ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3825   // pred
3826   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3827   return true;
3828 }
3829 
3830 /// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3831 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3832 /// when they refer multiple MIOperands inside a single one.
3833 bool ARMAsmParser::
3834 cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3835                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3836   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3837 
3838   // Create a writeback register dummy placeholder.
3839   Inst.addOperand(MCOperand::CreateImm(0));
3840 
3841   ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3842   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3843   return true;
3844 }
3845 
3846 /// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3847 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3848 /// when they refer multiple MIOperands inside a single one.
3849 bool ARMAsmParser::
3850 cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3851                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3852   // Create a writeback register dummy placeholder.
3853   Inst.addOperand(MCOperand::CreateImm(0));
3854   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3855   ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3856   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3857   return true;
3858 }
3859 
3860 /// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3861 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3862 /// when they refer multiple MIOperands inside a single one.
3863 bool ARMAsmParser::
3864 cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3865                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3866   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3867 
3868   // Create a writeback register dummy placeholder.
3869   Inst.addOperand(MCOperand::CreateImm(0));
3870 
3871   ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3872   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3873   return true;
3874 }
3875 
3876 /// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3877 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3878 /// when they refer multiple MIOperands inside a single one.
3879 bool ARMAsmParser::
3880 cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3881                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3882   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3883 
3884   // Create a writeback register dummy placeholder.
3885   Inst.addOperand(MCOperand::CreateImm(0));
3886 
3887   ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3888   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3889   return true;
3890 }
3891 
3892 
3893 /// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3894 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3895 /// when they refer multiple MIOperands inside a single one.
3896 bool ARMAsmParser::
3897 cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3898                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3899   // Create a writeback register dummy placeholder.
3900   Inst.addOperand(MCOperand::CreateImm(0));
3901   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3902   ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3903   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3904   return true;
3905 }
3906 
3907 /// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3908 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3909 /// when they refer multiple MIOperands inside a single one.
3910 bool ARMAsmParser::
3911 cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3912                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3913   // Create a writeback register dummy placeholder.
3914   Inst.addOperand(MCOperand::CreateImm(0));
3915   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3916   ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3917   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3918   return true;
3919 }
3920 
3921 /// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3922 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3923 /// when they refer multiple MIOperands inside a single one.
3924 bool ARMAsmParser::
3925 cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3926                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3927   // Create a writeback register dummy placeholder.
3928   Inst.addOperand(MCOperand::CreateImm(0));
3929   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3930   ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3931   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3932   return true;
3933 }
3934 
3935 /// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3936 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3937 /// when they refer multiple MIOperands inside a single one.
3938 bool ARMAsmParser::
3939 cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3940                       const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3941   // Rt
3942   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3943   // Create a writeback register dummy placeholder.
3944   Inst.addOperand(MCOperand::CreateImm(0));
3945   // addr
3946   ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3947   // offset
3948   ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3949   // pred
3950   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3951   return true;
3952 }
3953 
3954 /// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3955 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3956 /// when they refer multiple MIOperands inside a single one.
3957 bool ARMAsmParser::
3958 cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3959                       const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3960   // Rt
3961   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3962   // Create a writeback register dummy placeholder.
3963   Inst.addOperand(MCOperand::CreateImm(0));
3964   // addr
3965   ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3966   // offset
3967   ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3968   // pred
3969   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3970   return true;
3971 }
3972 
3973 /// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3974 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3975 /// when they refer multiple MIOperands inside a single one.
3976 bool ARMAsmParser::
3977 cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3978                       const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3979   // Create a writeback register dummy placeholder.
3980   Inst.addOperand(MCOperand::CreateImm(0));
3981   // Rt
3982   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3983   // addr
3984   ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3985   // offset
3986   ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3987   // pred
3988   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3989   return true;
3990 }
3991 
3992 /// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3993 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3994 /// when they refer multiple MIOperands inside a single one.
3995 bool ARMAsmParser::
3996 cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3997                       const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3998   // Create a writeback register dummy placeholder.
3999   Inst.addOperand(MCOperand::CreateImm(0));
4000   // Rt
4001   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4002   // addr
4003   ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4004   // offset
4005   ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
4006   // pred
4007   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4008   return true;
4009 }
4010 
4011 /// cvtLdrdPre - Convert parsed operands to MCInst.
4012 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4013 /// when they refer multiple MIOperands inside a single one.
4014 bool ARMAsmParser::
4015 cvtLdrdPre(MCInst &Inst, unsigned Opcode,
4016            const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4017   // Rt, Rt2
4018   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4019   ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4020   // Create a writeback register dummy placeholder.
4021   Inst.addOperand(MCOperand::CreateImm(0));
4022   // addr
4023   ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4024   // pred
4025   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4026   return true;
4027 }
4028 
4029 /// cvtStrdPre - Convert parsed operands to MCInst.
4030 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4031 /// when they refer multiple MIOperands inside a single one.
4032 bool ARMAsmParser::
4033 cvtStrdPre(MCInst &Inst, unsigned Opcode,
4034            const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4035   // Create a writeback register dummy placeholder.
4036   Inst.addOperand(MCOperand::CreateImm(0));
4037   // Rt, Rt2
4038   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4039   ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4040   // addr
4041   ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4042   // pred
4043   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4044   return true;
4045 }
4046 
4047 /// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4048 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4049 /// when they refer multiple MIOperands inside a single one.
4050 bool ARMAsmParser::
4051 cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
4052                          const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4053   ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4054   // Create a writeback register dummy placeholder.
4055   Inst.addOperand(MCOperand::CreateImm(0));
4056   ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4057   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4058   return true;
4059 }
4060 
4061 /// cvtThumbMultiple- Convert parsed operands to MCInst.
4062 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4063 /// when they refer multiple MIOperands inside a single one.
4064 bool ARMAsmParser::
4065 cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
4066            const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4067   // The second source operand must be the same register as the destination
4068   // operand.
4069   if (Operands.size() == 6 &&
4070       (((ARMOperand*)Operands[3])->getReg() !=
4071        ((ARMOperand*)Operands[5])->getReg()) &&
4072       (((ARMOperand*)Operands[3])->getReg() !=
4073        ((ARMOperand*)Operands[4])->getReg())) {
4074     Error(Operands[3]->getStartLoc(),
4075           "destination register must match source register");
4076     return false;
4077   }
4078   ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4079   ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4080   // If we have a three-operand form, make sure to set Rn to be the operand
4081   // that isn't the same as Rd.
4082   unsigned RegOp = 4;
4083   if (Operands.size() == 6 &&
4084       ((ARMOperand*)Operands[4])->getReg() ==
4085         ((ARMOperand*)Operands[3])->getReg())
4086     RegOp = 5;
4087   ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4088   Inst.addOperand(Inst.getOperand(0));
4089   ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4090 
4091   return true;
4092 }
4093 
4094 bool ARMAsmParser::
4095 cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
4096               const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4097   // Vd
4098   ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4099   // Create a writeback register dummy placeholder.
4100   Inst.addOperand(MCOperand::CreateImm(0));
4101   // Vn
4102   ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4103   // pred
4104   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4105   return true;
4106 }
4107 
4108 bool ARMAsmParser::
4109 cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4110                  const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4111   // Vd
4112   ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4113   // Create a writeback register dummy placeholder.
4114   Inst.addOperand(MCOperand::CreateImm(0));
4115   // Vn
4116   ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4117   // Vm
4118   ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4119   // pred
4120   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4121   return true;
4122 }
4123 
4124 bool ARMAsmParser::
4125 cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4126               const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4127   // Create a writeback register dummy placeholder.
4128   Inst.addOperand(MCOperand::CreateImm(0));
4129   // Vn
4130   ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4131   // Vt
4132   ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4133   // pred
4134   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4135   return true;
4136 }
4137 
4138 bool ARMAsmParser::
4139 cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4140                  const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4141   // Create a writeback register dummy placeholder.
4142   Inst.addOperand(MCOperand::CreateImm(0));
4143   // Vn
4144   ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4145   // Vm
4146   ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4147   // Vt
4148   ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4149   // pred
4150   ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4151   return true;
4152 }
4153 
4154 /// Parse an ARM memory expression, return false if successful else return true
4155 /// or an error.  The first token must be a '[' when called.
4156 bool ARMAsmParser::
4157 parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4158   SMLoc S, E;
4159   assert(Parser.getTok().is(AsmToken::LBrac) &&
4160          "Token is not a Left Bracket");
4161   S = Parser.getTok().getLoc();
4162   Parser.Lex(); // Eat left bracket token.
4163 
4164   const AsmToken &BaseRegTok = Parser.getTok();
4165   int BaseRegNum = tryParseRegister();
4166   if (BaseRegNum == -1)
4167     return Error(BaseRegTok.getLoc(), "register expected");
4168 
4169   // The next token must either be a comma or a closing bracket.
4170   const AsmToken &Tok = Parser.getTok();
4171   if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4172     return Error(Tok.getLoc(), "malformed memory operand");
4173 
4174   if (Tok.is(AsmToken::RBrac)) {
4175     E = Tok.getLoc();
4176     Parser.Lex(); // Eat right bracket token.
4177 
4178     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4179                                              0, 0, false, S, E));
4180 
4181     // If there's a pre-indexing writeback marker, '!', just add it as a token
4182     // operand. It's rather odd, but syntactically valid.
4183     if (Parser.getTok().is(AsmToken::Exclaim)) {
4184       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4185       Parser.Lex(); // Eat the '!'.
4186     }
4187 
4188     return false;
4189   }
4190 
4191   assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4192   Parser.Lex(); // Eat the comma.
4193 
4194   // If we have a ':', it's an alignment specifier.
4195   if (Parser.getTok().is(AsmToken::Colon)) {
4196     Parser.Lex(); // Eat the ':'.
4197     E = Parser.getTok().getLoc();
4198 
4199     const MCExpr *Expr;
4200     if (getParser().ParseExpression(Expr))
4201      return true;
4202 
4203     // The expression has to be a constant. Memory references with relocations
4204     // don't come through here, as they use the <label> forms of the relevant
4205     // instructions.
4206     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4207     if (!CE)
4208       return Error (E, "constant expression expected");
4209 
4210     unsigned Align = 0;
4211     switch (CE->getValue()) {
4212     default:
4213       return Error(E,
4214                    "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4215     case 16:  Align = 2; break;
4216     case 32:  Align = 4; break;
4217     case 64:  Align = 8; break;
4218     case 128: Align = 16; break;
4219     case 256: Align = 32; break;
4220     }
4221 
4222     // Now we should have the closing ']'
4223     E = Parser.getTok().getLoc();
4224     if (Parser.getTok().isNot(AsmToken::RBrac))
4225       return Error(E, "']' expected");
4226     Parser.Lex(); // Eat right bracket token.
4227 
4228     // Don't worry about range checking the value here. That's handled by
4229     // the is*() predicates.
4230     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4231                                              ARM_AM::no_shift, 0, Align,
4232                                              false, S, E));
4233 
4234     // If there's a pre-indexing writeback marker, '!', just add it as a token
4235     // operand.
4236     if (Parser.getTok().is(AsmToken::Exclaim)) {
4237       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4238       Parser.Lex(); // Eat the '!'.
4239     }
4240 
4241     return false;
4242   }
4243 
4244   // If we have a '#', it's an immediate offset, else assume it's a register
4245   // offset. Be friendly and also accept a plain integer (without a leading
4246   // hash) for gas compatibility.
4247   if (Parser.getTok().is(AsmToken::Hash) ||
4248       Parser.getTok().is(AsmToken::Dollar) ||
4249       Parser.getTok().is(AsmToken::Integer)) {
4250     if (Parser.getTok().isNot(AsmToken::Integer))
4251       Parser.Lex(); // Eat the '#'.
4252     E = Parser.getTok().getLoc();
4253 
4254     bool isNegative = getParser().getTok().is(AsmToken::Minus);
4255     const MCExpr *Offset;
4256     if (getParser().ParseExpression(Offset))
4257      return true;
4258 
4259     // The expression has to be a constant. Memory references with relocations
4260     // don't come through here, as they use the <label> forms of the relevant
4261     // instructions.
4262     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4263     if (!CE)
4264       return Error (E, "constant expression expected");
4265 
4266     // If the constant was #-0, represent it as INT32_MIN.
4267     int32_t Val = CE->getValue();
4268     if (isNegative && Val == 0)
4269       CE = MCConstantExpr::Create(INT32_MIN, getContext());
4270 
4271     // Now we should have the closing ']'
4272     E = Parser.getTok().getLoc();
4273     if (Parser.getTok().isNot(AsmToken::RBrac))
4274       return Error(E, "']' expected");
4275     Parser.Lex(); // Eat right bracket token.
4276 
4277     // Don't worry about range checking the value here. That's handled by
4278     // the is*() predicates.
4279     Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4280                                              ARM_AM::no_shift, 0, 0,
4281                                              false, S, E));
4282 
4283     // If there's a pre-indexing writeback marker, '!', just add it as a token
4284     // operand.
4285     if (Parser.getTok().is(AsmToken::Exclaim)) {
4286       Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4287       Parser.Lex(); // Eat the '!'.
4288     }
4289 
4290     return false;
4291   }
4292 
4293   // The register offset is optionally preceded by a '+' or '-'
4294   bool isNegative = false;
4295   if (Parser.getTok().is(AsmToken::Minus)) {
4296     isNegative = true;
4297     Parser.Lex(); // Eat the '-'.
4298   } else if (Parser.getTok().is(AsmToken::Plus)) {
4299     // Nothing to do.
4300     Parser.Lex(); // Eat the '+'.
4301   }
4302 
4303   E = Parser.getTok().getLoc();
4304   int OffsetRegNum = tryParseRegister();
4305   if (OffsetRegNum == -1)
4306     return Error(E, "register expected");
4307 
4308   // If there's a shift operator, handle it.
4309   ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4310   unsigned ShiftImm = 0;
4311   if (Parser.getTok().is(AsmToken::Comma)) {
4312     Parser.Lex(); // Eat the ','.
4313     if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4314       return true;
4315   }
4316 
4317   // Now we should have the closing ']'
4318   E = Parser.getTok().getLoc();
4319   if (Parser.getTok().isNot(AsmToken::RBrac))
4320     return Error(E, "']' expected");
4321   Parser.Lex(); // Eat right bracket token.
4322 
4323   Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4324                                            ShiftType, ShiftImm, 0, isNegative,
4325                                            S, E));
4326 
4327   // If there's a pre-indexing writeback marker, '!', just add it as a token
4328   // operand.
4329   if (Parser.getTok().is(AsmToken::Exclaim)) {
4330     Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4331     Parser.Lex(); // Eat the '!'.
4332   }
4333 
4334   return false;
4335 }
4336 
4337 /// parseMemRegOffsetShift - one of these two:
4338 ///   ( lsl | lsr | asr | ror ) , # shift_amount
4339 ///   rrx
4340 /// return true if it parses a shift otherwise it returns false.
4341 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4342                                           unsigned &Amount) {
4343   SMLoc Loc = Parser.getTok().getLoc();
4344   const AsmToken &Tok = Parser.getTok();
4345   if (Tok.isNot(AsmToken::Identifier))
4346     return true;
4347   StringRef ShiftName = Tok.getString();
4348   if (ShiftName == "lsl" || ShiftName == "LSL" ||
4349       ShiftName == "asl" || ShiftName == "ASL")
4350     St = ARM_AM::lsl;
4351   else if (ShiftName == "lsr" || ShiftName == "LSR")
4352     St = ARM_AM::lsr;
4353   else if (ShiftName == "asr" || ShiftName == "ASR")
4354     St = ARM_AM::asr;
4355   else if (ShiftName == "ror" || ShiftName == "ROR")
4356     St = ARM_AM::ror;
4357   else if (ShiftName == "rrx" || ShiftName == "RRX")
4358     St = ARM_AM::rrx;
4359   else
4360     return Error(Loc, "illegal shift operator");
4361   Parser.Lex(); // Eat shift type token.
4362 
4363   // rrx stands alone.
4364   Amount = 0;
4365   if (St != ARM_AM::rrx) {
4366     Loc = Parser.getTok().getLoc();
4367     // A '#' and a shift amount.
4368     const AsmToken &HashTok = Parser.getTok();
4369     if (HashTok.isNot(AsmToken::Hash) &&
4370         HashTok.isNot(AsmToken::Dollar))
4371       return Error(HashTok.getLoc(), "'#' expected");
4372     Parser.Lex(); // Eat hash token.
4373 
4374     const MCExpr *Expr;
4375     if (getParser().ParseExpression(Expr))
4376       return true;
4377     // Range check the immediate.
4378     // lsl, ror: 0 <= imm <= 31
4379     // lsr, asr: 0 <= imm <= 32
4380     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4381     if (!CE)
4382       return Error(Loc, "shift amount must be an immediate");
4383     int64_t Imm = CE->getValue();
4384     if (Imm < 0 ||
4385         ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4386         ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4387       return Error(Loc, "immediate shift value out of range");
4388     Amount = Imm;
4389   }
4390 
4391   return false;
4392 }
4393 
4394 /// parseFPImm - A floating point immediate expression operand.
4395 ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4396 parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4397   // Anything that can accept a floating point constant as an operand
4398   // needs to go through here, as the regular ParseExpression is
4399   // integer only.
4400   //
4401   // This routine still creates a generic Immediate operand, containing
4402   // a bitcast of the 64-bit floating point value. The various operands
4403   // that accept floats can check whether the value is valid for them
4404   // via the standard is*() predicates.
4405 
4406   SMLoc S = Parser.getTok().getLoc();
4407 
4408   if (Parser.getTok().isNot(AsmToken::Hash) &&
4409       Parser.getTok().isNot(AsmToken::Dollar))
4410     return MatchOperand_NoMatch;
4411 
4412   // Disambiguate the VMOV forms that can accept an FP immediate.
4413   // vmov.f32 <sreg>, #imm
4414   // vmov.f64 <dreg>, #imm
4415   // vmov.f32 <dreg>, #imm  @ vector f32x2
4416   // vmov.f32 <qreg>, #imm  @ vector f32x4
4417   //
4418   // There are also the NEON VMOV instructions which expect an
4419   // integer constant. Make sure we don't try to parse an FPImm
4420   // for these:
4421   // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4422   ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4423   if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4424                            TyOp->getToken() != ".f64"))
4425     return MatchOperand_NoMatch;
4426 
4427   Parser.Lex(); // Eat the '#'.
4428 
4429   // Handle negation, as that still comes through as a separate token.
4430   bool isNegative = false;
4431   if (Parser.getTok().is(AsmToken::Minus)) {
4432     isNegative = true;
4433     Parser.Lex();
4434   }
4435   const AsmToken &Tok = Parser.getTok();
4436   SMLoc Loc = Tok.getLoc();
4437   if (Tok.is(AsmToken::Real)) {
4438     APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4439     uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4440     // If we had a '-' in front, toggle the sign bit.
4441     IntVal ^= (uint64_t)isNegative << 31;
4442     Parser.Lex(); // Eat the token.
4443     Operands.push_back(ARMOperand::CreateImm(
4444           MCConstantExpr::Create(IntVal, getContext()),
4445           S, Parser.getTok().getLoc()));
4446     return MatchOperand_Success;
4447   }
4448   // Also handle plain integers. Instructions which allow floating point
4449   // immediates also allow a raw encoded 8-bit value.
4450   if (Tok.is(AsmToken::Integer)) {
4451     int64_t Val = Tok.getIntVal();
4452     Parser.Lex(); // Eat the token.
4453     if (Val > 255 || Val < 0) {
4454       Error(Loc, "encoded floating point value out of range");
4455       return MatchOperand_ParseFail;
4456     }
4457     double RealVal = ARM_AM::getFPImmFloat(Val);
4458     Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4459     Operands.push_back(ARMOperand::CreateImm(
4460         MCConstantExpr::Create(Val, getContext()), S,
4461         Parser.getTok().getLoc()));
4462     return MatchOperand_Success;
4463   }
4464 
4465   Error(Loc, "invalid floating point immediate");
4466   return MatchOperand_ParseFail;
4467 }
4468 
4469 /// Parse a arm instruction operand.  For now this parses the operand regardless
4470 /// of the mnemonic.
4471 bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4472                                 StringRef Mnemonic) {
4473   SMLoc S, E;
4474 
4475   // Check if the current operand has a custom associated parser, if so, try to
4476   // custom parse the operand, or fallback to the general approach.
4477   OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4478   if (ResTy == MatchOperand_Success)
4479     return false;
4480   // If there wasn't a custom match, try the generic matcher below. Otherwise,
4481   // there was a match, but an error occurred, in which case, just return that
4482   // the operand parsing failed.
4483   if (ResTy == MatchOperand_ParseFail)
4484     return true;
4485 
4486   switch (getLexer().getKind()) {
4487   default:
4488     Error(Parser.getTok().getLoc(), "unexpected token in operand");
4489     return true;
4490   case AsmToken::Identifier: {
4491     if (!tryParseRegisterWithWriteBack(Operands))
4492       return false;
4493     int Res = tryParseShiftRegister(Operands);
4494     if (Res == 0) // success
4495       return false;
4496     else if (Res == -1) // irrecoverable error
4497       return true;
4498     // If this is VMRS, check for the apsr_nzcv operand.
4499     if (Mnemonic == "vmrs" &&
4500         Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4501       S = Parser.getTok().getLoc();
4502       Parser.Lex();
4503       Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4504       return false;
4505     }
4506 
4507     // Fall though for the Identifier case that is not a register or a
4508     // special name.
4509   }
4510   case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4511   case AsmToken::Integer: // things like 1f and 2b as a branch targets
4512   case AsmToken::String:  // quoted label names.
4513   case AsmToken::Dot: {   // . as a branch target
4514     // This was not a register so parse other operands that start with an
4515     // identifier (like labels) as expressions and create them as immediates.
4516     const MCExpr *IdVal;
4517     S = Parser.getTok().getLoc();
4518     if (getParser().ParseExpression(IdVal))
4519       return true;
4520     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4521     Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4522     return false;
4523   }
4524   case AsmToken::LBrac:
4525     return parseMemory(Operands);
4526   case AsmToken::LCurly:
4527     return parseRegisterList(Operands);
4528   case AsmToken::Dollar:
4529   case AsmToken::Hash: {
4530     // #42 -> immediate.
4531     S = Parser.getTok().getLoc();
4532     Parser.Lex();
4533 
4534     if (Parser.getTok().isNot(AsmToken::Colon)) {
4535       bool isNegative = Parser.getTok().is(AsmToken::Minus);
4536       const MCExpr *ImmVal;
4537       if (getParser().ParseExpression(ImmVal))
4538         return true;
4539       const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4540       if (CE) {
4541         int32_t Val = CE->getValue();
4542         if (isNegative && Val == 0)
4543           ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4544       }
4545       E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4546       Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4547       return false;
4548     }
4549     // w/ a ':' after the '#', it's just like a plain ':'.
4550     // FALLTHROUGH
4551   }
4552   case AsmToken::Colon: {
4553     // ":lower16:" and ":upper16:" expression prefixes
4554     // FIXME: Check it's an expression prefix,
4555     // e.g. (FOO - :lower16:BAR) isn't legal.
4556     ARMMCExpr::VariantKind RefKind;
4557     if (parsePrefix(RefKind))
4558       return true;
4559 
4560     const MCExpr *SubExprVal;
4561     if (getParser().ParseExpression(SubExprVal))
4562       return true;
4563 
4564     const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4565                                                    getContext());
4566     E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4567     Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4568     return false;
4569   }
4570   }
4571 }
4572 
4573 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4574 //  :lower16: and :upper16:.
4575 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4576   RefKind = ARMMCExpr::VK_ARM_None;
4577 
4578   // :lower16: and :upper16: modifiers
4579   assert(getLexer().is(AsmToken::Colon) && "expected a :");
4580   Parser.Lex(); // Eat ':'
4581 
4582   if (getLexer().isNot(AsmToken::Identifier)) {
4583     Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4584     return true;
4585   }
4586 
4587   StringRef IDVal = Parser.getTok().getIdentifier();
4588   if (IDVal == "lower16") {
4589     RefKind = ARMMCExpr::VK_ARM_LO16;
4590   } else if (IDVal == "upper16") {
4591     RefKind = ARMMCExpr::VK_ARM_HI16;
4592   } else {
4593     Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4594     return true;
4595   }
4596   Parser.Lex();
4597 
4598   if (getLexer().isNot(AsmToken::Colon)) {
4599     Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4600     return true;
4601   }
4602   Parser.Lex(); // Eat the last ':'
4603   return false;
4604 }
4605 
4606 /// \brief Given a mnemonic, split out possible predication code and carry
4607 /// setting letters to form a canonical mnemonic and flags.
4608 //
4609 // FIXME: Would be nice to autogen this.
4610 // FIXME: This is a bit of a maze of special cases.
4611 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4612                                       unsigned &PredicationCode,
4613                                       bool &CarrySetting,
4614                                       unsigned &ProcessorIMod,
4615                                       StringRef &ITMask) {
4616   PredicationCode = ARMCC::AL;
4617   CarrySetting = false;
4618   ProcessorIMod = 0;
4619 
4620   // Ignore some mnemonics we know aren't predicated forms.
4621   //
4622   // FIXME: Would be nice to autogen this.
4623   if ((Mnemonic == "movs" && isThumb()) ||
4624       Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4625       Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4626       Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4627       Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4628       Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4629       Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4630       Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4631       Mnemonic == "fmuls")
4632     return Mnemonic;
4633 
4634   // First, split out any predication code. Ignore mnemonics we know aren't
4635   // predicated but do have a carry-set and so weren't caught above.
4636   if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4637       Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4638       Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4639       Mnemonic != "sbcs" && Mnemonic != "rscs") {
4640     unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4641       .Case("eq", ARMCC::EQ)
4642       .Case("ne", ARMCC::NE)
4643       .Case("hs", ARMCC::HS)
4644       .Case("cs", ARMCC::HS)
4645       .Case("lo", ARMCC::LO)
4646       .Case("cc", ARMCC::LO)
4647       .Case("mi", ARMCC::MI)
4648       .Case("pl", ARMCC::PL)
4649       .Case("vs", ARMCC::VS)
4650       .Case("vc", ARMCC::VC)
4651       .Case("hi", ARMCC::HI)
4652       .Case("ls", ARMCC::LS)
4653       .Case("ge", ARMCC::GE)
4654       .Case("lt", ARMCC::LT)
4655       .Case("gt", ARMCC::GT)
4656       .Case("le", ARMCC::LE)
4657       .Case("al", ARMCC::AL)
4658       .Default(~0U);
4659     if (CC != ~0U) {
4660       Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4661       PredicationCode = CC;
4662     }
4663   }
4664 
4665   // Next, determine if we have a carry setting bit. We explicitly ignore all
4666   // the instructions we know end in 's'.
4667   if (Mnemonic.endswith("s") &&
4668       !(Mnemonic == "cps" || Mnemonic == "mls" ||
4669         Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4670         Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4671         Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4672         Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4673         Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4674         Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4675         Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4676         Mnemonic == "vfms" || Mnemonic == "vfnms" ||
4677         (Mnemonic == "movs" && isThumb()))) {
4678     Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4679     CarrySetting = true;
4680   }
4681 
4682   // The "cps" instruction can have a interrupt mode operand which is glued into
4683   // the mnemonic. Check if this is the case, split it and parse the imod op
4684   if (Mnemonic.startswith("cps")) {
4685     // Split out any imod code.
4686     unsigned IMod =
4687       StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4688       .Case("ie", ARM_PROC::IE)
4689       .Case("id", ARM_PROC::ID)
4690       .Default(~0U);
4691     if (IMod != ~0U) {
4692       Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4693       ProcessorIMod = IMod;
4694     }
4695   }
4696 
4697   // The "it" instruction has the condition mask on the end of the mnemonic.
4698   if (Mnemonic.startswith("it")) {
4699     ITMask = Mnemonic.slice(2, Mnemonic.size());
4700     Mnemonic = Mnemonic.slice(0, 2);
4701   }
4702 
4703   return Mnemonic;
4704 }
4705 
4706 /// \brief Given a canonical mnemonic, determine if the instruction ever allows
4707 /// inclusion of carry set or predication code operands.
4708 //
4709 // FIXME: It would be nice to autogen this.
4710 void ARMAsmParser::
4711 getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4712                       bool &CanAcceptPredicationCode) {
4713   if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4714       Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4715       Mnemonic == "add" || Mnemonic == "adc" ||
4716       Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4717       Mnemonic == "orr" || Mnemonic == "mvn" ||
4718       Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4719       Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4720       Mnemonic == "vfm" || Mnemonic == "vfnm" ||
4721       (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4722                       Mnemonic == "mla" || Mnemonic == "smlal" ||
4723                       Mnemonic == "umlal" || Mnemonic == "umull"))) {
4724     CanAcceptCarrySet = true;
4725   } else
4726     CanAcceptCarrySet = false;
4727 
4728   if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4729       Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4730       Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4731       Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4732       Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4733       (Mnemonic == "clrex" && !isThumb()) ||
4734       (Mnemonic == "nop" && isThumbOne()) ||
4735       ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4736         Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4737         Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4738       ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4739        !isThumb()) ||
4740       Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4741     CanAcceptPredicationCode = false;
4742   } else
4743     CanAcceptPredicationCode = true;
4744 
4745   if (isThumb()) {
4746     if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4747         Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4748       CanAcceptPredicationCode = false;
4749   }
4750 }
4751 
4752 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4753                                SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4754   // FIXME: This is all horribly hacky. We really need a better way to deal
4755   // with optional operands like this in the matcher table.
4756 
4757   // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4758   // another does not. Specifically, the MOVW instruction does not. So we
4759   // special case it here and remove the defaulted (non-setting) cc_out
4760   // operand if that's the instruction we're trying to match.
4761   //
4762   // We do this as post-processing of the explicit operands rather than just
4763   // conditionally adding the cc_out in the first place because we need
4764   // to check the type of the parsed immediate operand.
4765   if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4766       !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4767       static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4768       static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4769     return true;
4770 
4771   // Register-register 'add' for thumb does not have a cc_out operand
4772   // when there are only two register operands.
4773   if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4774       static_cast<ARMOperand*>(Operands[3])->isReg() &&
4775       static_cast<ARMOperand*>(Operands[4])->isReg() &&
4776       static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4777     return true;
4778   // Register-register 'add' for thumb does not have a cc_out operand
4779   // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4780   // have to check the immediate range here since Thumb2 has a variant
4781   // that can handle a different range and has a cc_out operand.
4782   if (((isThumb() && Mnemonic == "add") ||
4783        (isThumbTwo() && Mnemonic == "sub")) &&
4784       Operands.size() == 6 &&
4785       static_cast<ARMOperand*>(Operands[3])->isReg() &&
4786       static_cast<ARMOperand*>(Operands[4])->isReg() &&
4787       static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4788       static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4789       ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
4790        static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4791     return true;
4792   // For Thumb2, add/sub immediate does not have a cc_out operand for the
4793   // imm0_4095 variant. That's the least-preferred variant when
4794   // selecting via the generic "add" mnemonic, so to know that we
4795   // should remove the cc_out operand, we have to explicitly check that
4796   // it's not one of the other variants. Ugh.
4797   if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4798       Operands.size() == 6 &&
4799       static_cast<ARMOperand*>(Operands[3])->isReg() &&
4800       static_cast<ARMOperand*>(Operands[4])->isReg() &&
4801       static_cast<ARMOperand*>(Operands[5])->isImm()) {
4802     // Nest conditions rather than one big 'if' statement for readability.
4803     //
4804     // If either register is a high reg, it's either one of the SP
4805     // variants (handled above) or a 32-bit encoding, so we just
4806     // check against T3. If the second register is the PC, this is an
4807     // alternate form of ADR, which uses encoding T4, so check for that too.
4808     if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4809          !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4810         static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4811         static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4812       return false;
4813     // If both registers are low, we're in an IT block, and the immediate is
4814     // in range, we should use encoding T1 instead, which has a cc_out.
4815     if (inITBlock() &&
4816         isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4817         isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4818         static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4819       return false;
4820 
4821     // Otherwise, we use encoding T4, which does not have a cc_out
4822     // operand.
4823     return true;
4824   }
4825 
4826   // The thumb2 multiply instruction doesn't have a CCOut register, so
4827   // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4828   // use the 16-bit encoding or not.
4829   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4830       static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4831       static_cast<ARMOperand*>(Operands[3])->isReg() &&
4832       static_cast<ARMOperand*>(Operands[4])->isReg() &&
4833       static_cast<ARMOperand*>(Operands[5])->isReg() &&
4834       // If the registers aren't low regs, the destination reg isn't the
4835       // same as one of the source regs, or the cc_out operand is zero
4836       // outside of an IT block, we have to use the 32-bit encoding, so
4837       // remove the cc_out operand.
4838       (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4839        !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4840        !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4841        !inITBlock() ||
4842        (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4843         static_cast<ARMOperand*>(Operands[5])->getReg() &&
4844         static_cast<ARMOperand*>(Operands[3])->getReg() !=
4845         static_cast<ARMOperand*>(Operands[4])->getReg())))
4846     return true;
4847 
4848   // Also check the 'mul' syntax variant that doesn't specify an explicit
4849   // destination register.
4850   if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4851       static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4852       static_cast<ARMOperand*>(Operands[3])->isReg() &&
4853       static_cast<ARMOperand*>(Operands[4])->isReg() &&
4854       // If the registers aren't low regs  or the cc_out operand is zero
4855       // outside of an IT block, we have to use the 32-bit encoding, so
4856       // remove the cc_out operand.
4857       (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4858        !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4859        !inITBlock()))
4860     return true;
4861 
4862 
4863 
4864   // Register-register 'add/sub' for thumb does not have a cc_out operand
4865   // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4866   // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4867   // right, this will result in better diagnostics (which operand is off)
4868   // anyway.
4869   if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4870       (Operands.size() == 5 || Operands.size() == 6) &&
4871       static_cast<ARMOperand*>(Operands[3])->isReg() &&
4872       static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4873       static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4874       (static_cast<ARMOperand*>(Operands[4])->isImm() ||
4875        (Operands.size() == 6 &&
4876         static_cast<ARMOperand*>(Operands[5])->isImm())))
4877     return true;
4878 
4879   return false;
4880 }
4881 
4882 static bool isDataTypeToken(StringRef Tok) {
4883   return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4884     Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4885     Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4886     Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4887     Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4888     Tok == ".f" || Tok == ".d";
4889 }
4890 
4891 // FIXME: This bit should probably be handled via an explicit match class
4892 // in the .td files that matches the suffix instead of having it be
4893 // a literal string token the way it is now.
4894 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4895   return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4896 }
4897 
4898 static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4899 /// Parse an arm instruction mnemonic followed by its operands.
4900 bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4901                                SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4902   // Apply mnemonic aliases before doing anything else, as the destination
4903   // mnemnonic may include suffices and we want to handle them normally.
4904   // The generic tblgen'erated code does this later, at the start of
4905   // MatchInstructionImpl(), but that's too late for aliases that include
4906   // any sort of suffix.
4907   unsigned AvailableFeatures = getAvailableFeatures();
4908   applyMnemonicAliases(Name, AvailableFeatures);
4909 
4910   // First check for the ARM-specific .req directive.
4911   if (Parser.getTok().is(AsmToken::Identifier) &&
4912       Parser.getTok().getIdentifier() == ".req") {
4913     parseDirectiveReq(Name, NameLoc);
4914     // We always return 'error' for this, as we're done with this
4915     // statement and don't need to match the 'instruction."
4916     return true;
4917   }
4918 
4919   // Create the leading tokens for the mnemonic, split by '.' characters.
4920   size_t Start = 0, Next = Name.find('.');
4921   StringRef Mnemonic = Name.slice(Start, Next);
4922 
4923   // Split out the predication code and carry setting flag from the mnemonic.
4924   unsigned PredicationCode;
4925   unsigned ProcessorIMod;
4926   bool CarrySetting;
4927   StringRef ITMask;
4928   Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4929                            ProcessorIMod, ITMask);
4930 
4931   // In Thumb1, only the branch (B) instruction can be predicated.
4932   if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4933     Parser.EatToEndOfStatement();
4934     return Error(NameLoc, "conditional execution not supported in Thumb1");
4935   }
4936 
4937   Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4938 
4939   // Handle the IT instruction ITMask. Convert it to a bitmask. This
4940   // is the mask as it will be for the IT encoding if the conditional
4941   // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4942   // where the conditional bit0 is zero, the instruction post-processing
4943   // will adjust the mask accordingly.
4944   if (Mnemonic == "it") {
4945     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4946     if (ITMask.size() > 3) {
4947       Parser.EatToEndOfStatement();
4948       return Error(Loc, "too many conditions on IT instruction");
4949     }
4950     unsigned Mask = 8;
4951     for (unsigned i = ITMask.size(); i != 0; --i) {
4952       char pos = ITMask[i - 1];
4953       if (pos != 't' && pos != 'e') {
4954         Parser.EatToEndOfStatement();
4955         return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4956       }
4957       Mask >>= 1;
4958       if (ITMask[i - 1] == 't')
4959         Mask |= 8;
4960     }
4961     Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4962   }
4963 
4964   // FIXME: This is all a pretty gross hack. We should automatically handle
4965   // optional operands like this via tblgen.
4966 
4967   // Next, add the CCOut and ConditionCode operands, if needed.
4968   //
4969   // For mnemonics which can ever incorporate a carry setting bit or predication
4970   // code, our matching model involves us always generating CCOut and
4971   // ConditionCode operands to match the mnemonic "as written" and then we let
4972   // the matcher deal with finding the right instruction or generating an
4973   // appropriate error.
4974   bool CanAcceptCarrySet, CanAcceptPredicationCode;
4975   getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4976 
4977   // If we had a carry-set on an instruction that can't do that, issue an
4978   // error.
4979   if (!CanAcceptCarrySet && CarrySetting) {
4980     Parser.EatToEndOfStatement();
4981     return Error(NameLoc, "instruction '" + Mnemonic +
4982                  "' can not set flags, but 's' suffix specified");
4983   }
4984   // If we had a predication code on an instruction that can't do that, issue an
4985   // error.
4986   if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4987     Parser.EatToEndOfStatement();
4988     return Error(NameLoc, "instruction '" + Mnemonic +
4989                  "' is not predicable, but condition code specified");
4990   }
4991 
4992   // Add the carry setting operand, if necessary.
4993   if (CanAcceptCarrySet) {
4994     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4995     Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4996                                                Loc));
4997   }
4998 
4999   // Add the predication code operand, if necessary.
5000   if (CanAcceptPredicationCode) {
5001     SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5002                                       CarrySetting);
5003     Operands.push_back(ARMOperand::CreateCondCode(
5004                          ARMCC::CondCodes(PredicationCode), Loc));
5005   }
5006 
5007   // Add the processor imod operand, if necessary.
5008   if (ProcessorIMod) {
5009     Operands.push_back(ARMOperand::CreateImm(
5010           MCConstantExpr::Create(ProcessorIMod, getContext()),
5011                                  NameLoc, NameLoc));
5012   }
5013 
5014   // Add the remaining tokens in the mnemonic.
5015   while (Next != StringRef::npos) {
5016     Start = Next;
5017     Next = Name.find('.', Start + 1);
5018     StringRef ExtraToken = Name.slice(Start, Next);
5019 
5020     // Some NEON instructions have an optional datatype suffix that is
5021     // completely ignored. Check for that.
5022     if (isDataTypeToken(ExtraToken) &&
5023         doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5024       continue;
5025 
5026     if (ExtraToken != ".n") {
5027       SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5028       Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5029     }
5030   }
5031 
5032   // Read the remaining operands.
5033   if (getLexer().isNot(AsmToken::EndOfStatement)) {
5034     // Read the first operand.
5035     if (parseOperand(Operands, Mnemonic)) {
5036       Parser.EatToEndOfStatement();
5037       return true;
5038     }
5039 
5040     while (getLexer().is(AsmToken::Comma)) {
5041       Parser.Lex();  // Eat the comma.
5042 
5043       // Parse and remember the operand.
5044       if (parseOperand(Operands, Mnemonic)) {
5045         Parser.EatToEndOfStatement();
5046         return true;
5047       }
5048     }
5049   }
5050 
5051   if (getLexer().isNot(AsmToken::EndOfStatement)) {
5052     SMLoc Loc = getLexer().getLoc();
5053     Parser.EatToEndOfStatement();
5054     return Error(Loc, "unexpected token in argument list");
5055   }
5056 
5057   Parser.Lex(); // Consume the EndOfStatement
5058 
5059   // Some instructions, mostly Thumb, have forms for the same mnemonic that
5060   // do and don't have a cc_out optional-def operand. With some spot-checks
5061   // of the operand list, we can figure out which variant we're trying to
5062   // parse and adjust accordingly before actually matching. We shouldn't ever
5063   // try to remove a cc_out operand that was explicitly set on the the
5064   // mnemonic, of course (CarrySetting == true). Reason number #317 the
5065   // table driven matcher doesn't fit well with the ARM instruction set.
5066   if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5067     ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5068     Operands.erase(Operands.begin() + 1);
5069     delete Op;
5070   }
5071 
5072   // ARM mode 'blx' need special handling, as the register operand version
5073   // is predicable, but the label operand version is not. So, we can't rely
5074   // on the Mnemonic based checking to correctly figure out when to put
5075   // a k_CondCode operand in the list. If we're trying to match the label
5076   // version, remove the k_CondCode operand here.
5077   if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5078       static_cast<ARMOperand*>(Operands[2])->isImm()) {
5079     ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5080     Operands.erase(Operands.begin() + 1);
5081     delete Op;
5082   }
5083 
5084   // The vector-compare-to-zero instructions have a literal token "#0" at
5085   // the end that comes to here as an immediate operand. Convert it to a
5086   // token to play nicely with the matcher.
5087   if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
5088       Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
5089       static_cast<ARMOperand*>(Operands[5])->isImm()) {
5090     ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5091     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5092     if (CE && CE->getValue() == 0) {
5093       Operands.erase(Operands.begin() + 5);
5094       Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5095       delete Op;
5096     }
5097   }
5098   // VCMP{E} does the same thing, but with a different operand count.
5099   if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
5100       static_cast<ARMOperand*>(Operands[4])->isImm()) {
5101     ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5102     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5103     if (CE && CE->getValue() == 0) {
5104       Operands.erase(Operands.begin() + 4);
5105       Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5106       delete Op;
5107     }
5108   }
5109   // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5110   // end. Convert it to a token here. Take care not to convert those
5111   // that should hit the Thumb2 encoding.
5112   if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5113       static_cast<ARMOperand*>(Operands[3])->isReg() &&
5114       static_cast<ARMOperand*>(Operands[4])->isReg() &&
5115       static_cast<ARMOperand*>(Operands[5])->isImm()) {
5116     ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5117     const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5118     if (CE && CE->getValue() == 0 &&
5119         (isThumbOne() ||
5120          // The cc_out operand matches the IT block.
5121          ((inITBlock() != CarrySetting) &&
5122          // Neither register operand is a high register.
5123          (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5124           isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5125       Operands.erase(Operands.begin() + 5);
5126       Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5127       delete Op;
5128     }
5129   }
5130 
5131   return false;
5132 }
5133 
5134 // Validate context-sensitive operand constraints.
5135 
5136 // return 'true' if register list contains non-low GPR registers,
5137 // 'false' otherwise. If Reg is in the register list or is HiReg, set
5138 // 'containsReg' to true.
5139 static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5140                                  unsigned HiReg, bool &containsReg) {
5141   containsReg = false;
5142   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5143     unsigned OpReg = Inst.getOperand(i).getReg();
5144     if (OpReg == Reg)
5145       containsReg = true;
5146     // Anything other than a low register isn't legal here.
5147     if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5148       return true;
5149   }
5150   return false;
5151 }
5152 
5153 // Check if the specified regisgter is in the register list of the inst,
5154 // starting at the indicated operand number.
5155 static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5156   for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5157     unsigned OpReg = Inst.getOperand(i).getReg();
5158     if (OpReg == Reg)
5159       return true;
5160   }
5161   return false;
5162 }
5163 
5164 // FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5165 // the ARMInsts array) instead. Getting that here requires awkward
5166 // API changes, though. Better way?
5167 namespace llvm {
5168 extern const MCInstrDesc ARMInsts[];
5169 }
5170 static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5171   return ARMInsts[Opcode];
5172 }
5173 
5174 // FIXME: We would really like to be able to tablegen'erate this.
5175 bool ARMAsmParser::
5176 validateInstruction(MCInst &Inst,
5177                     const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5178   const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5179   SMLoc Loc = Operands[0]->getStartLoc();
5180   // Check the IT block state first.
5181   // NOTE: BKPT instruction has the interesting property of being
5182   // allowed in IT blocks, but not being predicable.  It just always
5183   // executes.
5184   if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5185       Inst.getOpcode() != ARM::BKPT) {
5186     unsigned bit = 1;
5187     if (ITState.FirstCond)
5188       ITState.FirstCond = false;
5189     else
5190       bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5191     // The instruction must be predicable.
5192     if (!MCID.isPredicable())
5193       return Error(Loc, "instructions in IT block must be predicable");
5194     unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5195     unsigned ITCond = bit ? ITState.Cond :
5196       ARMCC::getOppositeCondition(ITState.Cond);
5197     if (Cond != ITCond) {
5198       // Find the condition code Operand to get its SMLoc information.
5199       SMLoc CondLoc;
5200       for (unsigned i = 1; i < Operands.size(); ++i)
5201         if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5202           CondLoc = Operands[i]->getStartLoc();
5203       return Error(CondLoc, "incorrect condition in IT block; got '" +
5204                    StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5205                    "', but expected '" +
5206                    ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5207     }
5208   // Check for non-'al' condition codes outside of the IT block.
5209   } else if (isThumbTwo() && MCID.isPredicable() &&
5210              Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5211              ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5212              Inst.getOpcode() != ARM::t2B)
5213     return Error(Loc, "predicated instructions must be in IT block");
5214 
5215   switch (Inst.getOpcode()) {
5216   case ARM::LDRD:
5217   case ARM::LDRD_PRE:
5218   case ARM::LDRD_POST:
5219   case ARM::LDREXD: {
5220     // Rt2 must be Rt + 1.
5221     unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5222     unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5223     if (Rt2 != Rt + 1)
5224       return Error(Operands[3]->getStartLoc(),
5225                    "destination operands must be sequential");
5226     return false;
5227   }
5228   case ARM::STRD: {
5229     // Rt2 must be Rt + 1.
5230     unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5231     unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5232     if (Rt2 != Rt + 1)
5233       return Error(Operands[3]->getStartLoc(),
5234                    "source operands must be sequential");
5235     return false;
5236   }
5237   case ARM::STRD_PRE:
5238   case ARM::STRD_POST:
5239   case ARM::STREXD: {
5240     // Rt2 must be Rt + 1.
5241     unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5242     unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5243     if (Rt2 != Rt + 1)
5244       return Error(Operands[3]->getStartLoc(),
5245                    "source operands must be sequential");
5246     return false;
5247   }
5248   case ARM::SBFX:
5249   case ARM::UBFX: {
5250     // width must be in range [1, 32-lsb]
5251     unsigned lsb = Inst.getOperand(2).getImm();
5252     unsigned widthm1 = Inst.getOperand(3).getImm();
5253     if (widthm1 >= 32 - lsb)
5254       return Error(Operands[5]->getStartLoc(),
5255                    "bitfield width must be in range [1,32-lsb]");
5256     return false;
5257   }
5258   case ARM::tLDMIA: {
5259     // If we're parsing Thumb2, the .w variant is available and handles
5260     // most cases that are normally illegal for a Thumb1 LDM
5261     // instruction. We'll make the transformation in processInstruction()
5262     // if necessary.
5263     //
5264     // Thumb LDM instructions are writeback iff the base register is not
5265     // in the register list.
5266     unsigned Rn = Inst.getOperand(0).getReg();
5267     bool hasWritebackToken =
5268       (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5269        static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5270     bool listContainsBase;
5271     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5272       return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5273                    "registers must be in range r0-r7");
5274     // If we should have writeback, then there should be a '!' token.
5275     if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5276       return Error(Operands[2]->getStartLoc(),
5277                    "writeback operator '!' expected");
5278     // If we should not have writeback, there must not be a '!'. This is
5279     // true even for the 32-bit wide encodings.
5280     if (listContainsBase && hasWritebackToken)
5281       return Error(Operands[3]->getStartLoc(),
5282                    "writeback operator '!' not allowed when base register "
5283                    "in register list");
5284 
5285     break;
5286   }
5287   case ARM::t2LDMIA_UPD: {
5288     if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5289       return Error(Operands[4]->getStartLoc(),
5290                    "writeback operator '!' not allowed when base register "
5291                    "in register list");
5292     break;
5293   }
5294   // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5295   // so only issue a diagnostic for thumb1. The instructions will be
5296   // switched to the t2 encodings in processInstruction() if necessary.
5297   case ARM::tPOP: {
5298     bool listContainsBase;
5299     if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5300         !isThumbTwo())
5301       return Error(Operands[2]->getStartLoc(),
5302                    "registers must be in range r0-r7 or pc");
5303     break;
5304   }
5305   case ARM::tPUSH: {
5306     bool listContainsBase;
5307     if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5308         !isThumbTwo())
5309       return Error(Operands[2]->getStartLoc(),
5310                    "registers must be in range r0-r7 or lr");
5311     break;
5312   }
5313   case ARM::tSTMIA_UPD: {
5314     bool listContainsBase;
5315     if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5316       return Error(Operands[4]->getStartLoc(),
5317                    "registers must be in range r0-r7");
5318     break;
5319   }
5320   case ARM::tADDrSP: {
5321     // If the non-SP source operand and the destination operand are not the
5322     // same, we need thumb2 (for the wide encoding), or we have an error.
5323     if (!isThumbTwo() &&
5324         Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
5325       return Error(Operands[4]->getStartLoc(),
5326                    "source register must be the same as destination");
5327     }
5328     break;
5329   }
5330   }
5331 
5332   return false;
5333 }
5334 
5335 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5336   switch(Opc) {
5337   default: llvm_unreachable("unexpected opcode!");
5338   // VST1LN
5339   case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5340   case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5341   case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5342   case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5343   case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5344   case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5345   case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5346   case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5347   case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5348 
5349   // VST2LN
5350   case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5351   case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5352   case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5353   case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5354   case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5355 
5356   case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5357   case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5358   case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5359   case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5360   case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5361 
5362   case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5363   case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5364   case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5365   case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5366   case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5367 
5368   // VST3LN
5369   case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5370   case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5371   case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5372   case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5373   case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5374   case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5375   case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5376   case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5377   case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5378   case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5379   case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5380   case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5381   case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5382   case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5383   case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5384 
5385   // VST3
5386   case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5387   case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5388   case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5389   case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5390   case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5391   case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5392   case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5393   case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5394   case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5395   case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5396   case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5397   case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5398   case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5399   case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5400   case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5401   case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5402   case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5403   case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5404 
5405   // VST4LN
5406   case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5407   case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5408   case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5409   case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5410   case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5411   case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5412   case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5413   case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5414   case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5415   case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5416   case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5417   case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5418   case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5419   case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5420   case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5421 
5422   // VST4
5423   case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5424   case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5425   case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5426   case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5427   case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5428   case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5429   case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5430   case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5431   case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5432   case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5433   case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5434   case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5435   case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5436   case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5437   case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5438   case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5439   case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5440   case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5441   }
5442 }
5443 
5444 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5445   switch(Opc) {
5446   default: llvm_unreachable("unexpected opcode!");
5447   // VLD1LN
5448   case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5449   case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5450   case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5451   case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5452   case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5453   case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5454   case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5455   case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5456   case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5457 
5458   // VLD2LN
5459   case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5460   case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5461   case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5462   case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5463   case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5464   case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5465   case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5466   case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5467   case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5468   case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5469   case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5470   case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5471   case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5472   case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5473   case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5474 
5475   // VLD3DUP
5476   case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5477   case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5478   case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5479   case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5480   case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5481   case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5482   case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5483   case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5484   case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5485   case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5486   case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5487   case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5488   case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5489   case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5490   case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5491   case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5492   case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5493   case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5494 
5495   // VLD3LN
5496   case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5497   case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5498   case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5499   case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5500   case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5501   case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5502   case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5503   case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5504   case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5505   case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5506   case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5507   case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5508   case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5509   case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5510   case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5511 
5512   // VLD3
5513   case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5514   case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5515   case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5516   case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5517   case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5518   case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5519   case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5520   case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5521   case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5522   case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5523   case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5524   case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5525   case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5526   case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5527   case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5528   case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5529   case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5530   case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5531 
5532   // VLD4LN
5533   case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5534   case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5535   case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5536   case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5537   case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5538   case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5539   case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5540   case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5541   case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5542   case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5543   case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5544   case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5545   case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5546   case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5547   case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5548 
5549   // VLD4DUP
5550   case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5551   case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5552   case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5553   case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5554   case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5555   case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5556   case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5557   case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5558   case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5559   case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5560   case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5561   case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5562   case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5563   case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5564   case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5565   case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5566   case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5567   case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5568 
5569   // VLD4
5570   case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5571   case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5572   case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5573   case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5574   case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5575   case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5576   case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5577   case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5578   case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5579   case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5580   case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5581   case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5582   case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5583   case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5584   case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5585   case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5586   case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5587   case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5588   }
5589 }
5590 
5591 bool ARMAsmParser::
5592 processInstruction(MCInst &Inst,
5593                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5594   switch (Inst.getOpcode()) {
5595   // Aliases for alternate PC+imm syntax of LDR instructions.
5596   case ARM::t2LDRpcrel:
5597     Inst.setOpcode(ARM::t2LDRpci);
5598     return true;
5599   case ARM::t2LDRBpcrel:
5600     Inst.setOpcode(ARM::t2LDRBpci);
5601     return true;
5602   case ARM::t2LDRHpcrel:
5603     Inst.setOpcode(ARM::t2LDRHpci);
5604     return true;
5605   case ARM::t2LDRSBpcrel:
5606     Inst.setOpcode(ARM::t2LDRSBpci);
5607     return true;
5608   case ARM::t2LDRSHpcrel:
5609     Inst.setOpcode(ARM::t2LDRSHpci);
5610     return true;
5611   // Handle NEON VST complex aliases.
5612   case ARM::VST1LNdWB_register_Asm_8:
5613   case ARM::VST1LNdWB_register_Asm_16:
5614   case ARM::VST1LNdWB_register_Asm_32: {
5615     MCInst TmpInst;
5616     // Shuffle the operands around so the lane index operand is in the
5617     // right place.
5618     unsigned Spacing;
5619     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5620     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5621     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5622     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5623     TmpInst.addOperand(Inst.getOperand(4)); // Rm
5624     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5625     TmpInst.addOperand(Inst.getOperand(1)); // lane
5626     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5627     TmpInst.addOperand(Inst.getOperand(6));
5628     Inst = TmpInst;
5629     return true;
5630   }
5631 
5632   case ARM::VST2LNdWB_register_Asm_8:
5633   case ARM::VST2LNdWB_register_Asm_16:
5634   case ARM::VST2LNdWB_register_Asm_32:
5635   case ARM::VST2LNqWB_register_Asm_16:
5636   case ARM::VST2LNqWB_register_Asm_32: {
5637     MCInst TmpInst;
5638     // Shuffle the operands around so the lane index operand is in the
5639     // right place.
5640     unsigned Spacing;
5641     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5642     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5643     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5644     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5645     TmpInst.addOperand(Inst.getOperand(4)); // Rm
5646     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5647     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5648                                             Spacing));
5649     TmpInst.addOperand(Inst.getOperand(1)); // lane
5650     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5651     TmpInst.addOperand(Inst.getOperand(6));
5652     Inst = TmpInst;
5653     return true;
5654   }
5655 
5656   case ARM::VST3LNdWB_register_Asm_8:
5657   case ARM::VST3LNdWB_register_Asm_16:
5658   case ARM::VST3LNdWB_register_Asm_32:
5659   case ARM::VST3LNqWB_register_Asm_16:
5660   case ARM::VST3LNqWB_register_Asm_32: {
5661     MCInst TmpInst;
5662     // Shuffle the operands around so the lane index operand is in the
5663     // right place.
5664     unsigned Spacing;
5665     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5666     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5667     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5668     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5669     TmpInst.addOperand(Inst.getOperand(4)); // Rm
5670     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5671     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5672                                             Spacing));
5673     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5674                                             Spacing * 2));
5675     TmpInst.addOperand(Inst.getOperand(1)); // lane
5676     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5677     TmpInst.addOperand(Inst.getOperand(6));
5678     Inst = TmpInst;
5679     return true;
5680   }
5681 
5682   case ARM::VST4LNdWB_register_Asm_8:
5683   case ARM::VST4LNdWB_register_Asm_16:
5684   case ARM::VST4LNdWB_register_Asm_32:
5685   case ARM::VST4LNqWB_register_Asm_16:
5686   case ARM::VST4LNqWB_register_Asm_32: {
5687     MCInst TmpInst;
5688     // Shuffle the operands around so the lane index operand is in the
5689     // right place.
5690     unsigned Spacing;
5691     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5692     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5693     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5694     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5695     TmpInst.addOperand(Inst.getOperand(4)); // Rm
5696     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5697     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5698                                             Spacing));
5699     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5700                                             Spacing * 2));
5701     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5702                                             Spacing * 3));
5703     TmpInst.addOperand(Inst.getOperand(1)); // lane
5704     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5705     TmpInst.addOperand(Inst.getOperand(6));
5706     Inst = TmpInst;
5707     return true;
5708   }
5709 
5710   case ARM::VST1LNdWB_fixed_Asm_8:
5711   case ARM::VST1LNdWB_fixed_Asm_16:
5712   case ARM::VST1LNdWB_fixed_Asm_32: {
5713     MCInst TmpInst;
5714     // Shuffle the operands around so the lane index operand is in the
5715     // right place.
5716     unsigned Spacing;
5717     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5718     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5719     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5720     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5721     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5722     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5723     TmpInst.addOperand(Inst.getOperand(1)); // lane
5724     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5725     TmpInst.addOperand(Inst.getOperand(5));
5726     Inst = TmpInst;
5727     return true;
5728   }
5729 
5730   case ARM::VST2LNdWB_fixed_Asm_8:
5731   case ARM::VST2LNdWB_fixed_Asm_16:
5732   case ARM::VST2LNdWB_fixed_Asm_32:
5733   case ARM::VST2LNqWB_fixed_Asm_16:
5734   case ARM::VST2LNqWB_fixed_Asm_32: {
5735     MCInst TmpInst;
5736     // Shuffle the operands around so the lane index operand is in the
5737     // right place.
5738     unsigned Spacing;
5739     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5740     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5741     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5742     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5743     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5744     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5745     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5746                                             Spacing));
5747     TmpInst.addOperand(Inst.getOperand(1)); // lane
5748     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5749     TmpInst.addOperand(Inst.getOperand(5));
5750     Inst = TmpInst;
5751     return true;
5752   }
5753 
5754   case ARM::VST3LNdWB_fixed_Asm_8:
5755   case ARM::VST3LNdWB_fixed_Asm_16:
5756   case ARM::VST3LNdWB_fixed_Asm_32:
5757   case ARM::VST3LNqWB_fixed_Asm_16:
5758   case ARM::VST3LNqWB_fixed_Asm_32: {
5759     MCInst TmpInst;
5760     // Shuffle the operands around so the lane index operand is in the
5761     // right place.
5762     unsigned Spacing;
5763     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5764     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5765     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5766     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5767     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5768     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5769     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5770                                             Spacing));
5771     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5772                                             Spacing * 2));
5773     TmpInst.addOperand(Inst.getOperand(1)); // lane
5774     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5775     TmpInst.addOperand(Inst.getOperand(5));
5776     Inst = TmpInst;
5777     return true;
5778   }
5779 
5780   case ARM::VST4LNdWB_fixed_Asm_8:
5781   case ARM::VST4LNdWB_fixed_Asm_16:
5782   case ARM::VST4LNdWB_fixed_Asm_32:
5783   case ARM::VST4LNqWB_fixed_Asm_16:
5784   case ARM::VST4LNqWB_fixed_Asm_32: {
5785     MCInst TmpInst;
5786     // Shuffle the operands around so the lane index operand is in the
5787     // right place.
5788     unsigned Spacing;
5789     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5790     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5791     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5792     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5793     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5794     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5795     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5796                                             Spacing));
5797     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5798                                             Spacing * 2));
5799     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5800                                             Spacing * 3));
5801     TmpInst.addOperand(Inst.getOperand(1)); // lane
5802     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5803     TmpInst.addOperand(Inst.getOperand(5));
5804     Inst = TmpInst;
5805     return true;
5806   }
5807 
5808   case ARM::VST1LNdAsm_8:
5809   case ARM::VST1LNdAsm_16:
5810   case ARM::VST1LNdAsm_32: {
5811     MCInst TmpInst;
5812     // Shuffle the operands around so the lane index operand is in the
5813     // right place.
5814     unsigned Spacing;
5815     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5816     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5817     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5818     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5819     TmpInst.addOperand(Inst.getOperand(1)); // lane
5820     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5821     TmpInst.addOperand(Inst.getOperand(5));
5822     Inst = TmpInst;
5823     return true;
5824   }
5825 
5826   case ARM::VST2LNdAsm_8:
5827   case ARM::VST2LNdAsm_16:
5828   case ARM::VST2LNdAsm_32:
5829   case ARM::VST2LNqAsm_16:
5830   case ARM::VST2LNqAsm_32: {
5831     MCInst TmpInst;
5832     // Shuffle the operands around so the lane index operand is in the
5833     // right place.
5834     unsigned Spacing;
5835     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5836     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5837     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5838     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5839     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5840                                             Spacing));
5841     TmpInst.addOperand(Inst.getOperand(1)); // lane
5842     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5843     TmpInst.addOperand(Inst.getOperand(5));
5844     Inst = TmpInst;
5845     return true;
5846   }
5847 
5848   case ARM::VST3LNdAsm_8:
5849   case ARM::VST3LNdAsm_16:
5850   case ARM::VST3LNdAsm_32:
5851   case ARM::VST3LNqAsm_16:
5852   case ARM::VST3LNqAsm_32: {
5853     MCInst TmpInst;
5854     // Shuffle the operands around so the lane index operand is in the
5855     // right place.
5856     unsigned Spacing;
5857     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5858     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5859     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5860     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5861     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5862                                             Spacing));
5863     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5864                                             Spacing * 2));
5865     TmpInst.addOperand(Inst.getOperand(1)); // lane
5866     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5867     TmpInst.addOperand(Inst.getOperand(5));
5868     Inst = TmpInst;
5869     return true;
5870   }
5871 
5872   case ARM::VST4LNdAsm_8:
5873   case ARM::VST4LNdAsm_16:
5874   case ARM::VST4LNdAsm_32:
5875   case ARM::VST4LNqAsm_16:
5876   case ARM::VST4LNqAsm_32: {
5877     MCInst TmpInst;
5878     // Shuffle the operands around so the lane index operand is in the
5879     // right place.
5880     unsigned Spacing;
5881     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5882     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5883     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5884     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5885     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5886                                             Spacing));
5887     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5888                                             Spacing * 2));
5889     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5890                                             Spacing * 3));
5891     TmpInst.addOperand(Inst.getOperand(1)); // lane
5892     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5893     TmpInst.addOperand(Inst.getOperand(5));
5894     Inst = TmpInst;
5895     return true;
5896   }
5897 
5898   // Handle NEON VLD complex aliases.
5899   case ARM::VLD1LNdWB_register_Asm_8:
5900   case ARM::VLD1LNdWB_register_Asm_16:
5901   case ARM::VLD1LNdWB_register_Asm_32: {
5902     MCInst TmpInst;
5903     // Shuffle the operands around so the lane index operand is in the
5904     // right place.
5905     unsigned Spacing;
5906     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5907     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5908     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5909     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5910     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5911     TmpInst.addOperand(Inst.getOperand(4)); // Rm
5912     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5913     TmpInst.addOperand(Inst.getOperand(1)); // lane
5914     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5915     TmpInst.addOperand(Inst.getOperand(6));
5916     Inst = TmpInst;
5917     return true;
5918   }
5919 
5920   case ARM::VLD2LNdWB_register_Asm_8:
5921   case ARM::VLD2LNdWB_register_Asm_16:
5922   case ARM::VLD2LNdWB_register_Asm_32:
5923   case ARM::VLD2LNqWB_register_Asm_16:
5924   case ARM::VLD2LNqWB_register_Asm_32: {
5925     MCInst TmpInst;
5926     // Shuffle the operands around so the lane index operand is in the
5927     // right place.
5928     unsigned Spacing;
5929     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5930     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5931     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5932                                             Spacing));
5933     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5934     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5935     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5936     TmpInst.addOperand(Inst.getOperand(4)); // Rm
5937     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5938     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5939                                             Spacing));
5940     TmpInst.addOperand(Inst.getOperand(1)); // lane
5941     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5942     TmpInst.addOperand(Inst.getOperand(6));
5943     Inst = TmpInst;
5944     return true;
5945   }
5946 
5947   case ARM::VLD3LNdWB_register_Asm_8:
5948   case ARM::VLD3LNdWB_register_Asm_16:
5949   case ARM::VLD3LNdWB_register_Asm_32:
5950   case ARM::VLD3LNqWB_register_Asm_16:
5951   case ARM::VLD3LNqWB_register_Asm_32: {
5952     MCInst TmpInst;
5953     // Shuffle the operands around so the lane index operand is in the
5954     // right place.
5955     unsigned Spacing;
5956     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5957     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5958     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5959                                             Spacing));
5960     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5961                                             Spacing * 2));
5962     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5963     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5964     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5965     TmpInst.addOperand(Inst.getOperand(4)); // Rm
5966     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5967     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5968                                             Spacing));
5969     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5970                                             Spacing * 2));
5971     TmpInst.addOperand(Inst.getOperand(1)); // lane
5972     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5973     TmpInst.addOperand(Inst.getOperand(6));
5974     Inst = TmpInst;
5975     return true;
5976   }
5977 
5978   case ARM::VLD4LNdWB_register_Asm_8:
5979   case ARM::VLD4LNdWB_register_Asm_16:
5980   case ARM::VLD4LNdWB_register_Asm_32:
5981   case ARM::VLD4LNqWB_register_Asm_16:
5982   case ARM::VLD4LNqWB_register_Asm_32: {
5983     MCInst TmpInst;
5984     // Shuffle the operands around so the lane index operand is in the
5985     // right place.
5986     unsigned Spacing;
5987     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5988     TmpInst.addOperand(Inst.getOperand(0)); // Vd
5989     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5990                                             Spacing));
5991     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5992                                             Spacing * 2));
5993     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5994                                             Spacing * 3));
5995     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5996     TmpInst.addOperand(Inst.getOperand(2)); // Rn
5997     TmpInst.addOperand(Inst.getOperand(3)); // alignment
5998     TmpInst.addOperand(Inst.getOperand(4)); // Rm
5999     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6000     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6001                                             Spacing));
6002     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6003                                             Spacing * 2));
6004     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6005                                             Spacing * 3));
6006     TmpInst.addOperand(Inst.getOperand(1)); // lane
6007     TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6008     TmpInst.addOperand(Inst.getOperand(6));
6009     Inst = TmpInst;
6010     return true;
6011   }
6012 
6013   case ARM::VLD1LNdWB_fixed_Asm_8:
6014   case ARM::VLD1LNdWB_fixed_Asm_16:
6015   case ARM::VLD1LNdWB_fixed_Asm_32: {
6016     MCInst TmpInst;
6017     // Shuffle the operands around so the lane index operand is in the
6018     // right place.
6019     unsigned Spacing;
6020     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6021     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6022     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6023     TmpInst.addOperand(Inst.getOperand(2)); // Rn
6024     TmpInst.addOperand(Inst.getOperand(3)); // alignment
6025     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6026     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6027     TmpInst.addOperand(Inst.getOperand(1)); // lane
6028     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6029     TmpInst.addOperand(Inst.getOperand(5));
6030     Inst = TmpInst;
6031     return true;
6032   }
6033 
6034   case ARM::VLD2LNdWB_fixed_Asm_8:
6035   case ARM::VLD2LNdWB_fixed_Asm_16:
6036   case ARM::VLD2LNdWB_fixed_Asm_32:
6037   case ARM::VLD2LNqWB_fixed_Asm_16:
6038   case ARM::VLD2LNqWB_fixed_Asm_32: {
6039     MCInst TmpInst;
6040     // Shuffle the operands around so the lane index operand is in the
6041     // right place.
6042     unsigned Spacing;
6043     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6044     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6045     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6046                                             Spacing));
6047     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6048     TmpInst.addOperand(Inst.getOperand(2)); // Rn
6049     TmpInst.addOperand(Inst.getOperand(3)); // alignment
6050     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6051     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6052     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6053                                             Spacing));
6054     TmpInst.addOperand(Inst.getOperand(1)); // lane
6055     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6056     TmpInst.addOperand(Inst.getOperand(5));
6057     Inst = TmpInst;
6058     return true;
6059   }
6060 
6061   case ARM::VLD3LNdWB_fixed_Asm_8:
6062   case ARM::VLD3LNdWB_fixed_Asm_16:
6063   case ARM::VLD3LNdWB_fixed_Asm_32:
6064   case ARM::VLD3LNqWB_fixed_Asm_16:
6065   case ARM::VLD3LNqWB_fixed_Asm_32: {
6066     MCInst TmpInst;
6067     // Shuffle the operands around so the lane index operand is in the
6068     // right place.
6069     unsigned Spacing;
6070     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6071     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6072     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6073                                             Spacing));
6074     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6075                                             Spacing * 2));
6076     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6077     TmpInst.addOperand(Inst.getOperand(2)); // Rn
6078     TmpInst.addOperand(Inst.getOperand(3)); // alignment
6079     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6080     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6081     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6082                                             Spacing));
6083     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6084                                             Spacing * 2));
6085     TmpInst.addOperand(Inst.getOperand(1)); // lane
6086     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6087     TmpInst.addOperand(Inst.getOperand(5));
6088     Inst = TmpInst;
6089     return true;
6090   }
6091 
6092   case ARM::VLD4LNdWB_fixed_Asm_8:
6093   case ARM::VLD4LNdWB_fixed_Asm_16:
6094   case ARM::VLD4LNdWB_fixed_Asm_32:
6095   case ARM::VLD4LNqWB_fixed_Asm_16:
6096   case ARM::VLD4LNqWB_fixed_Asm_32: {
6097     MCInst TmpInst;
6098     // Shuffle the operands around so the lane index operand is in the
6099     // right place.
6100     unsigned Spacing;
6101     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6102     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6103     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6104                                             Spacing));
6105     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6106                                             Spacing * 2));
6107     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6108                                             Spacing * 3));
6109     TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6110     TmpInst.addOperand(Inst.getOperand(2)); // Rn
6111     TmpInst.addOperand(Inst.getOperand(3)); // alignment
6112     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6113     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6114     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6115                                             Spacing));
6116     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6117                                             Spacing * 2));
6118     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6119                                             Spacing * 3));
6120     TmpInst.addOperand(Inst.getOperand(1)); // lane
6121     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6122     TmpInst.addOperand(Inst.getOperand(5));
6123     Inst = TmpInst;
6124     return true;
6125   }
6126 
6127   case ARM::VLD1LNdAsm_8:
6128   case ARM::VLD1LNdAsm_16:
6129   case ARM::VLD1LNdAsm_32: {
6130     MCInst TmpInst;
6131     // Shuffle the operands around so the lane index operand is in the
6132     // right place.
6133     unsigned Spacing;
6134     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6135     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6136     TmpInst.addOperand(Inst.getOperand(2)); // Rn
6137     TmpInst.addOperand(Inst.getOperand(3)); // alignment
6138     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6139     TmpInst.addOperand(Inst.getOperand(1)); // lane
6140     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6141     TmpInst.addOperand(Inst.getOperand(5));
6142     Inst = TmpInst;
6143     return true;
6144   }
6145 
6146   case ARM::VLD2LNdAsm_8:
6147   case ARM::VLD2LNdAsm_16:
6148   case ARM::VLD2LNdAsm_32:
6149   case ARM::VLD2LNqAsm_16:
6150   case ARM::VLD2LNqAsm_32: {
6151     MCInst TmpInst;
6152     // Shuffle the operands around so the lane index operand is in the
6153     // right place.
6154     unsigned Spacing;
6155     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6156     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6157     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6158                                             Spacing));
6159     TmpInst.addOperand(Inst.getOperand(2)); // Rn
6160     TmpInst.addOperand(Inst.getOperand(3)); // alignment
6161     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6162     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6163                                             Spacing));
6164     TmpInst.addOperand(Inst.getOperand(1)); // lane
6165     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6166     TmpInst.addOperand(Inst.getOperand(5));
6167     Inst = TmpInst;
6168     return true;
6169   }
6170 
6171   case ARM::VLD3LNdAsm_8:
6172   case ARM::VLD3LNdAsm_16:
6173   case ARM::VLD3LNdAsm_32:
6174   case ARM::VLD3LNqAsm_16:
6175   case ARM::VLD3LNqAsm_32: {
6176     MCInst TmpInst;
6177     // Shuffle the operands around so the lane index operand is in the
6178     // right place.
6179     unsigned Spacing;
6180     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6181     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6182     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6183                                             Spacing));
6184     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6185                                             Spacing * 2));
6186     TmpInst.addOperand(Inst.getOperand(2)); // Rn
6187     TmpInst.addOperand(Inst.getOperand(3)); // alignment
6188     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6189     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6190                                             Spacing));
6191     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6192                                             Spacing * 2));
6193     TmpInst.addOperand(Inst.getOperand(1)); // lane
6194     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6195     TmpInst.addOperand(Inst.getOperand(5));
6196     Inst = TmpInst;
6197     return true;
6198   }
6199 
6200   case ARM::VLD4LNdAsm_8:
6201   case ARM::VLD4LNdAsm_16:
6202   case ARM::VLD4LNdAsm_32:
6203   case ARM::VLD4LNqAsm_16:
6204   case ARM::VLD4LNqAsm_32: {
6205     MCInst TmpInst;
6206     // Shuffle the operands around so the lane index operand is in the
6207     // right place.
6208     unsigned Spacing;
6209     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6210     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6211     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6212                                             Spacing));
6213     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6214                                             Spacing * 2));
6215     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6216                                             Spacing * 3));
6217     TmpInst.addOperand(Inst.getOperand(2)); // Rn
6218     TmpInst.addOperand(Inst.getOperand(3)); // alignment
6219     TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6220     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6221                                             Spacing));
6222     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6223                                             Spacing * 2));
6224     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6225                                             Spacing * 3));
6226     TmpInst.addOperand(Inst.getOperand(1)); // lane
6227     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6228     TmpInst.addOperand(Inst.getOperand(5));
6229     Inst = TmpInst;
6230     return true;
6231   }
6232 
6233   // VLD3DUP single 3-element structure to all lanes instructions.
6234   case ARM::VLD3DUPdAsm_8:
6235   case ARM::VLD3DUPdAsm_16:
6236   case ARM::VLD3DUPdAsm_32:
6237   case ARM::VLD3DUPqAsm_8:
6238   case ARM::VLD3DUPqAsm_16:
6239   case ARM::VLD3DUPqAsm_32: {
6240     MCInst TmpInst;
6241     unsigned Spacing;
6242     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6243     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6244     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6245                                             Spacing));
6246     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6247                                             Spacing * 2));
6248     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6249     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6250     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6251     TmpInst.addOperand(Inst.getOperand(4));
6252     Inst = TmpInst;
6253     return true;
6254   }
6255 
6256   case ARM::VLD3DUPdWB_fixed_Asm_8:
6257   case ARM::VLD3DUPdWB_fixed_Asm_16:
6258   case ARM::VLD3DUPdWB_fixed_Asm_32:
6259   case ARM::VLD3DUPqWB_fixed_Asm_8:
6260   case ARM::VLD3DUPqWB_fixed_Asm_16:
6261   case ARM::VLD3DUPqWB_fixed_Asm_32: {
6262     MCInst TmpInst;
6263     unsigned Spacing;
6264     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6265     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6266     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6267                                             Spacing));
6268     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6269                                             Spacing * 2));
6270     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6271     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6272     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6273     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6274     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6275     TmpInst.addOperand(Inst.getOperand(4));
6276     Inst = TmpInst;
6277     return true;
6278   }
6279 
6280   case ARM::VLD3DUPdWB_register_Asm_8:
6281   case ARM::VLD3DUPdWB_register_Asm_16:
6282   case ARM::VLD3DUPdWB_register_Asm_32:
6283   case ARM::VLD3DUPqWB_register_Asm_8:
6284   case ARM::VLD3DUPqWB_register_Asm_16:
6285   case ARM::VLD3DUPqWB_register_Asm_32: {
6286     MCInst TmpInst;
6287     unsigned Spacing;
6288     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6289     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6290     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6291                                             Spacing));
6292     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6293                                             Spacing * 2));
6294     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6295     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6296     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6297     TmpInst.addOperand(Inst.getOperand(3)); // Rm
6298     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6299     TmpInst.addOperand(Inst.getOperand(5));
6300     Inst = TmpInst;
6301     return true;
6302   }
6303 
6304   // VLD3 multiple 3-element structure instructions.
6305   case ARM::VLD3dAsm_8:
6306   case ARM::VLD3dAsm_16:
6307   case ARM::VLD3dAsm_32:
6308   case ARM::VLD3qAsm_8:
6309   case ARM::VLD3qAsm_16:
6310   case ARM::VLD3qAsm_32: {
6311     MCInst TmpInst;
6312     unsigned Spacing;
6313     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6314     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6315     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6316                                             Spacing));
6317     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6318                                             Spacing * 2));
6319     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6320     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6321     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6322     TmpInst.addOperand(Inst.getOperand(4));
6323     Inst = TmpInst;
6324     return true;
6325   }
6326 
6327   case ARM::VLD3dWB_fixed_Asm_8:
6328   case ARM::VLD3dWB_fixed_Asm_16:
6329   case ARM::VLD3dWB_fixed_Asm_32:
6330   case ARM::VLD3qWB_fixed_Asm_8:
6331   case ARM::VLD3qWB_fixed_Asm_16:
6332   case ARM::VLD3qWB_fixed_Asm_32: {
6333     MCInst TmpInst;
6334     unsigned Spacing;
6335     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6336     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6337     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6338                                             Spacing));
6339     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6340                                             Spacing * 2));
6341     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6342     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6343     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6344     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6345     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6346     TmpInst.addOperand(Inst.getOperand(4));
6347     Inst = TmpInst;
6348     return true;
6349   }
6350 
6351   case ARM::VLD3dWB_register_Asm_8:
6352   case ARM::VLD3dWB_register_Asm_16:
6353   case ARM::VLD3dWB_register_Asm_32:
6354   case ARM::VLD3qWB_register_Asm_8:
6355   case ARM::VLD3qWB_register_Asm_16:
6356   case ARM::VLD3qWB_register_Asm_32: {
6357     MCInst TmpInst;
6358     unsigned Spacing;
6359     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6360     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6361     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6362                                             Spacing));
6363     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6364                                             Spacing * 2));
6365     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6366     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6367     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6368     TmpInst.addOperand(Inst.getOperand(3)); // Rm
6369     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6370     TmpInst.addOperand(Inst.getOperand(5));
6371     Inst = TmpInst;
6372     return true;
6373   }
6374 
6375   // VLD4DUP single 3-element structure to all lanes instructions.
6376   case ARM::VLD4DUPdAsm_8:
6377   case ARM::VLD4DUPdAsm_16:
6378   case ARM::VLD4DUPdAsm_32:
6379   case ARM::VLD4DUPqAsm_8:
6380   case ARM::VLD4DUPqAsm_16:
6381   case ARM::VLD4DUPqAsm_32: {
6382     MCInst TmpInst;
6383     unsigned Spacing;
6384     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6385     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6386     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6387                                             Spacing));
6388     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6389                                             Spacing * 2));
6390     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6391                                             Spacing * 3));
6392     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6393     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6394     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6395     TmpInst.addOperand(Inst.getOperand(4));
6396     Inst = TmpInst;
6397     return true;
6398   }
6399 
6400   case ARM::VLD4DUPdWB_fixed_Asm_8:
6401   case ARM::VLD4DUPdWB_fixed_Asm_16:
6402   case ARM::VLD4DUPdWB_fixed_Asm_32:
6403   case ARM::VLD4DUPqWB_fixed_Asm_8:
6404   case ARM::VLD4DUPqWB_fixed_Asm_16:
6405   case ARM::VLD4DUPqWB_fixed_Asm_32: {
6406     MCInst TmpInst;
6407     unsigned Spacing;
6408     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6409     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6410     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6411                                             Spacing));
6412     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6413                                             Spacing * 2));
6414     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6415                                             Spacing * 3));
6416     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6417     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6418     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6419     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6420     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6421     TmpInst.addOperand(Inst.getOperand(4));
6422     Inst = TmpInst;
6423     return true;
6424   }
6425 
6426   case ARM::VLD4DUPdWB_register_Asm_8:
6427   case ARM::VLD4DUPdWB_register_Asm_16:
6428   case ARM::VLD4DUPdWB_register_Asm_32:
6429   case ARM::VLD4DUPqWB_register_Asm_8:
6430   case ARM::VLD4DUPqWB_register_Asm_16:
6431   case ARM::VLD4DUPqWB_register_Asm_32: {
6432     MCInst TmpInst;
6433     unsigned Spacing;
6434     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6435     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6436     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6437                                             Spacing));
6438     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6439                                             Spacing * 2));
6440     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6441                                             Spacing * 3));
6442     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6443     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6444     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6445     TmpInst.addOperand(Inst.getOperand(3)); // Rm
6446     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6447     TmpInst.addOperand(Inst.getOperand(5));
6448     Inst = TmpInst;
6449     return true;
6450   }
6451 
6452   // VLD4 multiple 4-element structure instructions.
6453   case ARM::VLD4dAsm_8:
6454   case ARM::VLD4dAsm_16:
6455   case ARM::VLD4dAsm_32:
6456   case ARM::VLD4qAsm_8:
6457   case ARM::VLD4qAsm_16:
6458   case ARM::VLD4qAsm_32: {
6459     MCInst TmpInst;
6460     unsigned Spacing;
6461     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6462     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6463     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6464                                             Spacing));
6465     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6466                                             Spacing * 2));
6467     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6468                                             Spacing * 3));
6469     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6470     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6471     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6472     TmpInst.addOperand(Inst.getOperand(4));
6473     Inst = TmpInst;
6474     return true;
6475   }
6476 
6477   case ARM::VLD4dWB_fixed_Asm_8:
6478   case ARM::VLD4dWB_fixed_Asm_16:
6479   case ARM::VLD4dWB_fixed_Asm_32:
6480   case ARM::VLD4qWB_fixed_Asm_8:
6481   case ARM::VLD4qWB_fixed_Asm_16:
6482   case ARM::VLD4qWB_fixed_Asm_32: {
6483     MCInst TmpInst;
6484     unsigned Spacing;
6485     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6486     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6487     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6488                                             Spacing));
6489     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6490                                             Spacing * 2));
6491     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6492                                             Spacing * 3));
6493     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6494     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6495     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6496     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6497     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6498     TmpInst.addOperand(Inst.getOperand(4));
6499     Inst = TmpInst;
6500     return true;
6501   }
6502 
6503   case ARM::VLD4dWB_register_Asm_8:
6504   case ARM::VLD4dWB_register_Asm_16:
6505   case ARM::VLD4dWB_register_Asm_32:
6506   case ARM::VLD4qWB_register_Asm_8:
6507   case ARM::VLD4qWB_register_Asm_16:
6508   case ARM::VLD4qWB_register_Asm_32: {
6509     MCInst TmpInst;
6510     unsigned Spacing;
6511     TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6512     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6513     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6514                                             Spacing));
6515     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6516                                             Spacing * 2));
6517     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6518                                             Spacing * 3));
6519     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6520     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6521     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6522     TmpInst.addOperand(Inst.getOperand(3)); // Rm
6523     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6524     TmpInst.addOperand(Inst.getOperand(5));
6525     Inst = TmpInst;
6526     return true;
6527   }
6528 
6529   // VST3 multiple 3-element structure instructions.
6530   case ARM::VST3dAsm_8:
6531   case ARM::VST3dAsm_16:
6532   case ARM::VST3dAsm_32:
6533   case ARM::VST3qAsm_8:
6534   case ARM::VST3qAsm_16:
6535   case ARM::VST3qAsm_32: {
6536     MCInst TmpInst;
6537     unsigned Spacing;
6538     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6539     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6540     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6541     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6542     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6543                                             Spacing));
6544     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6545                                             Spacing * 2));
6546     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6547     TmpInst.addOperand(Inst.getOperand(4));
6548     Inst = TmpInst;
6549     return true;
6550   }
6551 
6552   case ARM::VST3dWB_fixed_Asm_8:
6553   case ARM::VST3dWB_fixed_Asm_16:
6554   case ARM::VST3dWB_fixed_Asm_32:
6555   case ARM::VST3qWB_fixed_Asm_8:
6556   case ARM::VST3qWB_fixed_Asm_16:
6557   case ARM::VST3qWB_fixed_Asm_32: {
6558     MCInst TmpInst;
6559     unsigned Spacing;
6560     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6561     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6562     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6563     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6564     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6565     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6566     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6567                                             Spacing));
6568     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6569                                             Spacing * 2));
6570     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6571     TmpInst.addOperand(Inst.getOperand(4));
6572     Inst = TmpInst;
6573     return true;
6574   }
6575 
6576   case ARM::VST3dWB_register_Asm_8:
6577   case ARM::VST3dWB_register_Asm_16:
6578   case ARM::VST3dWB_register_Asm_32:
6579   case ARM::VST3qWB_register_Asm_8:
6580   case ARM::VST3qWB_register_Asm_16:
6581   case ARM::VST3qWB_register_Asm_32: {
6582     MCInst TmpInst;
6583     unsigned Spacing;
6584     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6585     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6586     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6587     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6588     TmpInst.addOperand(Inst.getOperand(3)); // Rm
6589     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6590     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6591                                             Spacing));
6592     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6593                                             Spacing * 2));
6594     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6595     TmpInst.addOperand(Inst.getOperand(5));
6596     Inst = TmpInst;
6597     return true;
6598   }
6599 
6600   // VST4 multiple 3-element structure instructions.
6601   case ARM::VST4dAsm_8:
6602   case ARM::VST4dAsm_16:
6603   case ARM::VST4dAsm_32:
6604   case ARM::VST4qAsm_8:
6605   case ARM::VST4qAsm_16:
6606   case ARM::VST4qAsm_32: {
6607     MCInst TmpInst;
6608     unsigned Spacing;
6609     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6610     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6611     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6612     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6613     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6614                                             Spacing));
6615     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6616                                             Spacing * 2));
6617     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6618                                             Spacing * 3));
6619     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6620     TmpInst.addOperand(Inst.getOperand(4));
6621     Inst = TmpInst;
6622     return true;
6623   }
6624 
6625   case ARM::VST4dWB_fixed_Asm_8:
6626   case ARM::VST4dWB_fixed_Asm_16:
6627   case ARM::VST4dWB_fixed_Asm_32:
6628   case ARM::VST4qWB_fixed_Asm_8:
6629   case ARM::VST4qWB_fixed_Asm_16:
6630   case ARM::VST4qWB_fixed_Asm_32: {
6631     MCInst TmpInst;
6632     unsigned Spacing;
6633     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6634     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6635     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6636     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6637     TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6638     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6639     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6640                                             Spacing));
6641     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6642                                             Spacing * 2));
6643     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6644                                             Spacing * 3));
6645     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6646     TmpInst.addOperand(Inst.getOperand(4));
6647     Inst = TmpInst;
6648     return true;
6649   }
6650 
6651   case ARM::VST4dWB_register_Asm_8:
6652   case ARM::VST4dWB_register_Asm_16:
6653   case ARM::VST4dWB_register_Asm_32:
6654   case ARM::VST4qWB_register_Asm_8:
6655   case ARM::VST4qWB_register_Asm_16:
6656   case ARM::VST4qWB_register_Asm_32: {
6657     MCInst TmpInst;
6658     unsigned Spacing;
6659     TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6660     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6661     TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6662     TmpInst.addOperand(Inst.getOperand(2)); // alignment
6663     TmpInst.addOperand(Inst.getOperand(3)); // Rm
6664     TmpInst.addOperand(Inst.getOperand(0)); // Vd
6665     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6666                                             Spacing));
6667     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6668                                             Spacing * 2));
6669     TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6670                                             Spacing * 3));
6671     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6672     TmpInst.addOperand(Inst.getOperand(5));
6673     Inst = TmpInst;
6674     return true;
6675   }
6676 
6677   // Handle encoding choice for the shift-immediate instructions.
6678   case ARM::t2LSLri:
6679   case ARM::t2LSRri:
6680   case ARM::t2ASRri: {
6681     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6682         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6683         Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
6684         !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
6685          static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
6686       unsigned NewOpc;
6687       switch (Inst.getOpcode()) {
6688       default: llvm_unreachable("unexpected opcode");
6689       case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
6690       case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
6691       case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
6692       }
6693       // The Thumb1 operands aren't in the same order. Awesome, eh?
6694       MCInst TmpInst;
6695       TmpInst.setOpcode(NewOpc);
6696       TmpInst.addOperand(Inst.getOperand(0));
6697       TmpInst.addOperand(Inst.getOperand(5));
6698       TmpInst.addOperand(Inst.getOperand(1));
6699       TmpInst.addOperand(Inst.getOperand(2));
6700       TmpInst.addOperand(Inst.getOperand(3));
6701       TmpInst.addOperand(Inst.getOperand(4));
6702       Inst = TmpInst;
6703       return true;
6704     }
6705     return false;
6706   }
6707 
6708   // Handle the Thumb2 mode MOV complex aliases.
6709   case ARM::t2MOVsr:
6710   case ARM::t2MOVSsr: {
6711     // Which instruction to expand to depends on the CCOut operand and
6712     // whether we're in an IT block if the register operands are low
6713     // registers.
6714     bool isNarrow = false;
6715     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6716         isARMLowRegister(Inst.getOperand(1).getReg()) &&
6717         isARMLowRegister(Inst.getOperand(2).getReg()) &&
6718         Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6719         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6720       isNarrow = true;
6721     MCInst TmpInst;
6722     unsigned newOpc;
6723     switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6724     default: llvm_unreachable("unexpected opcode!");
6725     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6726     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6727     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6728     case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6729     }
6730     TmpInst.setOpcode(newOpc);
6731     TmpInst.addOperand(Inst.getOperand(0)); // Rd
6732     if (isNarrow)
6733       TmpInst.addOperand(MCOperand::CreateReg(
6734           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6735     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6736     TmpInst.addOperand(Inst.getOperand(2)); // Rm
6737     TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6738     TmpInst.addOperand(Inst.getOperand(5));
6739     if (!isNarrow)
6740       TmpInst.addOperand(MCOperand::CreateReg(
6741           Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6742     Inst = TmpInst;
6743     return true;
6744   }
6745   case ARM::t2MOVsi:
6746   case ARM::t2MOVSsi: {
6747     // Which instruction to expand to depends on the CCOut operand and
6748     // whether we're in an IT block if the register operands are low
6749     // registers.
6750     bool isNarrow = false;
6751     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6752         isARMLowRegister(Inst.getOperand(1).getReg()) &&
6753         inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6754       isNarrow = true;
6755     MCInst TmpInst;
6756     unsigned newOpc;
6757     switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6758     default: llvm_unreachable("unexpected opcode!");
6759     case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6760     case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6761     case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6762     case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6763     case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6764     }
6765     unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6766     if (Ammount == 32) Ammount = 0;
6767     TmpInst.setOpcode(newOpc);
6768     TmpInst.addOperand(Inst.getOperand(0)); // Rd
6769     if (isNarrow)
6770       TmpInst.addOperand(MCOperand::CreateReg(
6771           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6772     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6773     if (newOpc != ARM::t2RRX)
6774       TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6775     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6776     TmpInst.addOperand(Inst.getOperand(4));
6777     if (!isNarrow)
6778       TmpInst.addOperand(MCOperand::CreateReg(
6779           Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6780     Inst = TmpInst;
6781     return true;
6782   }
6783   // Handle the ARM mode MOV complex aliases.
6784   case ARM::ASRr:
6785   case ARM::LSRr:
6786   case ARM::LSLr:
6787   case ARM::RORr: {
6788     ARM_AM::ShiftOpc ShiftTy;
6789     switch(Inst.getOpcode()) {
6790     default: llvm_unreachable("unexpected opcode!");
6791     case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6792     case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6793     case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6794     case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6795     }
6796     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6797     MCInst TmpInst;
6798     TmpInst.setOpcode(ARM::MOVsr);
6799     TmpInst.addOperand(Inst.getOperand(0)); // Rd
6800     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6801     TmpInst.addOperand(Inst.getOperand(2)); // Rm
6802     TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6803     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6804     TmpInst.addOperand(Inst.getOperand(4));
6805     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6806     Inst = TmpInst;
6807     return true;
6808   }
6809   case ARM::ASRi:
6810   case ARM::LSRi:
6811   case ARM::LSLi:
6812   case ARM::RORi: {
6813     ARM_AM::ShiftOpc ShiftTy;
6814     switch(Inst.getOpcode()) {
6815     default: llvm_unreachable("unexpected opcode!");
6816     case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6817     case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6818     case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6819     case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6820     }
6821     // A shift by zero is a plain MOVr, not a MOVsi.
6822     unsigned Amt = Inst.getOperand(2).getImm();
6823     unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6824     // A shift by 32 should be encoded as 0 when permitted
6825     if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
6826       Amt = 0;
6827     unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6828     MCInst TmpInst;
6829     TmpInst.setOpcode(Opc);
6830     TmpInst.addOperand(Inst.getOperand(0)); // Rd
6831     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6832     if (Opc == ARM::MOVsi)
6833       TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6834     TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6835     TmpInst.addOperand(Inst.getOperand(4));
6836     TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6837     Inst = TmpInst;
6838     return true;
6839   }
6840   case ARM::RRXi: {
6841     unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6842     MCInst TmpInst;
6843     TmpInst.setOpcode(ARM::MOVsi);
6844     TmpInst.addOperand(Inst.getOperand(0)); // Rd
6845     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6846     TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6847     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6848     TmpInst.addOperand(Inst.getOperand(3));
6849     TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6850     Inst = TmpInst;
6851     return true;
6852   }
6853   case ARM::t2LDMIA_UPD: {
6854     // If this is a load of a single register, then we should use
6855     // a post-indexed LDR instruction instead, per the ARM ARM.
6856     if (Inst.getNumOperands() != 5)
6857       return false;
6858     MCInst TmpInst;
6859     TmpInst.setOpcode(ARM::t2LDR_POST);
6860     TmpInst.addOperand(Inst.getOperand(4)); // Rt
6861     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6862     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6863     TmpInst.addOperand(MCOperand::CreateImm(4));
6864     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6865     TmpInst.addOperand(Inst.getOperand(3));
6866     Inst = TmpInst;
6867     return true;
6868   }
6869   case ARM::t2STMDB_UPD: {
6870     // If this is a store of a single register, then we should use
6871     // a pre-indexed STR instruction instead, per the ARM ARM.
6872     if (Inst.getNumOperands() != 5)
6873       return false;
6874     MCInst TmpInst;
6875     TmpInst.setOpcode(ARM::t2STR_PRE);
6876     TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6877     TmpInst.addOperand(Inst.getOperand(4)); // Rt
6878     TmpInst.addOperand(Inst.getOperand(1)); // Rn
6879     TmpInst.addOperand(MCOperand::CreateImm(-4));
6880     TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6881     TmpInst.addOperand(Inst.getOperand(3));
6882     Inst = TmpInst;
6883     return true;
6884   }
6885   case ARM::LDMIA_UPD:
6886     // If this is a load of a single register via a 'pop', then we should use
6887     // a post-indexed LDR instruction instead, per the ARM ARM.
6888     if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6889         Inst.getNumOperands() == 5) {
6890       MCInst TmpInst;
6891       TmpInst.setOpcode(ARM::LDR_POST_IMM);
6892       TmpInst.addOperand(Inst.getOperand(4)); // Rt
6893       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6894       TmpInst.addOperand(Inst.getOperand(1)); // Rn
6895       TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6896       TmpInst.addOperand(MCOperand::CreateImm(4));
6897       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6898       TmpInst.addOperand(Inst.getOperand(3));
6899       Inst = TmpInst;
6900       return true;
6901     }
6902     break;
6903   case ARM::STMDB_UPD:
6904     // If this is a store of a single register via a 'push', then we should use
6905     // a pre-indexed STR instruction instead, per the ARM ARM.
6906     if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6907         Inst.getNumOperands() == 5) {
6908       MCInst TmpInst;
6909       TmpInst.setOpcode(ARM::STR_PRE_IMM);
6910       TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6911       TmpInst.addOperand(Inst.getOperand(4)); // Rt
6912       TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6913       TmpInst.addOperand(MCOperand::CreateImm(-4));
6914       TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6915       TmpInst.addOperand(Inst.getOperand(3));
6916       Inst = TmpInst;
6917     }
6918     break;
6919   case ARM::t2ADDri12:
6920     // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6921     // mnemonic was used (not "addw"), encoding T3 is preferred.
6922     if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6923         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6924       break;
6925     Inst.setOpcode(ARM::t2ADDri);
6926     Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6927     break;
6928   case ARM::t2SUBri12:
6929     // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6930     // mnemonic was used (not "subw"), encoding T3 is preferred.
6931     if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6932         ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6933       break;
6934     Inst.setOpcode(ARM::t2SUBri);
6935     Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6936     break;
6937   case ARM::tADDi8:
6938     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6939     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6940     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6941     // to encoding T1 if <Rd> is omitted."
6942     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6943       Inst.setOpcode(ARM::tADDi3);
6944       return true;
6945     }
6946     break;
6947   case ARM::tSUBi8:
6948     // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6949     // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6950     // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6951     // to encoding T1 if <Rd> is omitted."
6952     if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6953       Inst.setOpcode(ARM::tSUBi3);
6954       return true;
6955     }
6956     break;
6957   case ARM::t2ADDri:
6958   case ARM::t2SUBri: {
6959     // If the destination and first source operand are the same, and
6960     // the flags are compatible with the current IT status, use encoding T2
6961     // instead of T3. For compatibility with the system 'as'. Make sure the
6962     // wide encoding wasn't explicit.
6963     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6964         !isARMLowRegister(Inst.getOperand(0).getReg()) ||
6965         (unsigned)Inst.getOperand(2).getImm() > 255 ||
6966         ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
6967         (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
6968         (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6969          static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6970       break;
6971     MCInst TmpInst;
6972     TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
6973                       ARM::tADDi8 : ARM::tSUBi8);
6974     TmpInst.addOperand(Inst.getOperand(0));
6975     TmpInst.addOperand(Inst.getOperand(5));
6976     TmpInst.addOperand(Inst.getOperand(0));
6977     TmpInst.addOperand(Inst.getOperand(2));
6978     TmpInst.addOperand(Inst.getOperand(3));
6979     TmpInst.addOperand(Inst.getOperand(4));
6980     Inst = TmpInst;
6981     return true;
6982   }
6983   case ARM::t2ADDrr: {
6984     // If the destination and first source operand are the same, and
6985     // there's no setting of the flags, use encoding T2 instead of T3.
6986     // Note that this is only for ADD, not SUB. This mirrors the system
6987     // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6988     if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6989         Inst.getOperand(5).getReg() != 0 ||
6990         (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6991          static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6992       break;
6993     MCInst TmpInst;
6994     TmpInst.setOpcode(ARM::tADDhirr);
6995     TmpInst.addOperand(Inst.getOperand(0));
6996     TmpInst.addOperand(Inst.getOperand(0));
6997     TmpInst.addOperand(Inst.getOperand(2));
6998     TmpInst.addOperand(Inst.getOperand(3));
6999     TmpInst.addOperand(Inst.getOperand(4));
7000     Inst = TmpInst;
7001     return true;
7002   }
7003   case ARM::tADDrSP: {
7004     // If the non-SP source operand and the destination operand are not the
7005     // same, we need to use the 32-bit encoding if it's available.
7006     if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
7007       Inst.setOpcode(ARM::t2ADDrr);
7008       Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7009       return true;
7010     }
7011     break;
7012   }
7013   case ARM::tB:
7014     // A Thumb conditional branch outside of an IT block is a tBcc.
7015     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
7016       Inst.setOpcode(ARM::tBcc);
7017       return true;
7018     }
7019     break;
7020   case ARM::t2B:
7021     // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
7022     if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
7023       Inst.setOpcode(ARM::t2Bcc);
7024       return true;
7025     }
7026     break;
7027   case ARM::t2Bcc:
7028     // If the conditional is AL or we're in an IT block, we really want t2B.
7029     if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
7030       Inst.setOpcode(ARM::t2B);
7031       return true;
7032     }
7033     break;
7034   case ARM::tBcc:
7035     // If the conditional is AL, we really want tB.
7036     if (Inst.getOperand(1).getImm() == ARMCC::AL) {
7037       Inst.setOpcode(ARM::tB);
7038       return true;
7039     }
7040     break;
7041   case ARM::tLDMIA: {
7042     // If the register list contains any high registers, or if the writeback
7043     // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7044     // instead if we're in Thumb2. Otherwise, this should have generated
7045     // an error in validateInstruction().
7046     unsigned Rn = Inst.getOperand(0).getReg();
7047     bool hasWritebackToken =
7048       (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7049        static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
7050     bool listContainsBase;
7051     if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
7052         (!listContainsBase && !hasWritebackToken) ||
7053         (listContainsBase && hasWritebackToken)) {
7054       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7055       assert (isThumbTwo());
7056       Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
7057       // If we're switching to the updating version, we need to insert
7058       // the writeback tied operand.
7059       if (hasWritebackToken)
7060         Inst.insert(Inst.begin(),
7061                     MCOperand::CreateReg(Inst.getOperand(0).getReg()));
7062       return true;
7063     }
7064     break;
7065   }
7066   case ARM::tSTMIA_UPD: {
7067     // If the register list contains any high registers, we need to use
7068     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7069     // should have generated an error in validateInstruction().
7070     unsigned Rn = Inst.getOperand(0).getReg();
7071     bool listContainsBase;
7072     if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7073       // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7074       assert (isThumbTwo());
7075       Inst.setOpcode(ARM::t2STMIA_UPD);
7076       return true;
7077     }
7078     break;
7079   }
7080   case ARM::tPOP: {
7081     bool listContainsBase;
7082     // If the register list contains any high registers, we need to use
7083     // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7084     // should have generated an error in validateInstruction().
7085     if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7086       return false;
7087     assert (isThumbTwo());
7088     Inst.setOpcode(ARM::t2LDMIA_UPD);
7089     // Add the base register and writeback operands.
7090     Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7091     Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7092     return true;
7093   }
7094   case ARM::tPUSH: {
7095     bool listContainsBase;
7096     if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7097       return false;
7098     assert (isThumbTwo());
7099     Inst.setOpcode(ARM::t2STMDB_UPD);
7100     // Add the base register and writeback operands.
7101     Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7102     Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7103     return true;
7104   }
7105   case ARM::t2MOVi: {
7106     // If we can use the 16-bit encoding and the user didn't explicitly
7107     // request the 32-bit variant, transform it here.
7108     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7109         (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7110         ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7111          Inst.getOperand(4).getReg() == ARM::CPSR) ||
7112         (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7113         (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7114          static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7115       // The operands aren't in the same order for tMOVi8...
7116       MCInst TmpInst;
7117       TmpInst.setOpcode(ARM::tMOVi8);
7118       TmpInst.addOperand(Inst.getOperand(0));
7119       TmpInst.addOperand(Inst.getOperand(4));
7120       TmpInst.addOperand(Inst.getOperand(1));
7121       TmpInst.addOperand(Inst.getOperand(2));
7122       TmpInst.addOperand(Inst.getOperand(3));
7123       Inst = TmpInst;
7124       return true;
7125     }
7126     break;
7127   }
7128   case ARM::t2MOVr: {
7129     // If we can use the 16-bit encoding and the user didn't explicitly
7130     // request the 32-bit variant, transform it here.
7131     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7132         isARMLowRegister(Inst.getOperand(1).getReg()) &&
7133         Inst.getOperand(2).getImm() == ARMCC::AL &&
7134         Inst.getOperand(4).getReg() == ARM::CPSR &&
7135         (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7136          static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7137       // The operands aren't the same for tMOV[S]r... (no cc_out)
7138       MCInst TmpInst;
7139       TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7140       TmpInst.addOperand(Inst.getOperand(0));
7141       TmpInst.addOperand(Inst.getOperand(1));
7142       TmpInst.addOperand(Inst.getOperand(2));
7143       TmpInst.addOperand(Inst.getOperand(3));
7144       Inst = TmpInst;
7145       return true;
7146     }
7147     break;
7148   }
7149   case ARM::t2SXTH:
7150   case ARM::t2SXTB:
7151   case ARM::t2UXTH:
7152   case ARM::t2UXTB: {
7153     // If we can use the 16-bit encoding and the user didn't explicitly
7154     // request the 32-bit variant, transform it here.
7155     if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7156         isARMLowRegister(Inst.getOperand(1).getReg()) &&
7157         Inst.getOperand(2).getImm() == 0 &&
7158         (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7159          static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7160       unsigned NewOpc;
7161       switch (Inst.getOpcode()) {
7162       default: llvm_unreachable("Illegal opcode!");
7163       case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7164       case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7165       case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7166       case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7167       }
7168       // The operands aren't the same for thumb1 (no rotate operand).
7169       MCInst TmpInst;
7170       TmpInst.setOpcode(NewOpc);
7171       TmpInst.addOperand(Inst.getOperand(0));
7172       TmpInst.addOperand(Inst.getOperand(1));
7173       TmpInst.addOperand(Inst.getOperand(3));
7174       TmpInst.addOperand(Inst.getOperand(4));
7175       Inst = TmpInst;
7176       return true;
7177     }
7178     break;
7179   }
7180   case ARM::MOVsi: {
7181     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7182     // rrx shifts and asr/lsr of #32 is encoded as 0
7183     if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
7184       return false;
7185     if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7186       // Shifting by zero is accepted as a vanilla 'MOVr'
7187       MCInst TmpInst;
7188       TmpInst.setOpcode(ARM::MOVr);
7189       TmpInst.addOperand(Inst.getOperand(0));
7190       TmpInst.addOperand(Inst.getOperand(1));
7191       TmpInst.addOperand(Inst.getOperand(3));
7192       TmpInst.addOperand(Inst.getOperand(4));
7193       TmpInst.addOperand(Inst.getOperand(5));
7194       Inst = TmpInst;
7195       return true;
7196     }
7197     return false;
7198   }
7199   case ARM::ANDrsi:
7200   case ARM::ORRrsi:
7201   case ARM::EORrsi:
7202   case ARM::BICrsi:
7203   case ARM::SUBrsi:
7204   case ARM::ADDrsi: {
7205     unsigned newOpc;
7206     ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7207     if (SOpc == ARM_AM::rrx) return false;
7208     switch (Inst.getOpcode()) {
7209     default: llvm_unreachable("unexpected opcode!");
7210     case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7211     case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7212     case ARM::EORrsi: newOpc = ARM::EORrr; break;
7213     case ARM::BICrsi: newOpc = ARM::BICrr; break;
7214     case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7215     case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7216     }
7217     // If the shift is by zero, use the non-shifted instruction definition.
7218     if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
7219       MCInst TmpInst;
7220       TmpInst.setOpcode(newOpc);
7221       TmpInst.addOperand(Inst.getOperand(0));
7222       TmpInst.addOperand(Inst.getOperand(1));
7223       TmpInst.addOperand(Inst.getOperand(2));
7224       TmpInst.addOperand(Inst.getOperand(4));
7225       TmpInst.addOperand(Inst.getOperand(5));
7226       TmpInst.addOperand(Inst.getOperand(6));
7227       Inst = TmpInst;
7228       return true;
7229     }
7230     return false;
7231   }
7232   case ARM::ITasm:
7233   case ARM::t2IT: {
7234     // The mask bits for all but the first condition are represented as
7235     // the low bit of the condition code value implies 't'. We currently
7236     // always have 1 implies 't', so XOR toggle the bits if the low bit
7237     // of the condition code is zero.
7238     MCOperand &MO = Inst.getOperand(1);
7239     unsigned Mask = MO.getImm();
7240     unsigned OrigMask = Mask;
7241     unsigned TZ = CountTrailingZeros_32(Mask);
7242     if ((Inst.getOperand(0).getImm() & 1) == 0) {
7243       assert(Mask && TZ <= 3 && "illegal IT mask value!");
7244       for (unsigned i = 3; i != TZ; --i)
7245         Mask ^= 1 << i;
7246     }
7247     MO.setImm(Mask);
7248 
7249     // Set up the IT block state according to the IT instruction we just
7250     // matched.
7251     assert(!inITBlock() && "nested IT blocks?!");
7252     ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7253     ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7254     ITState.CurPosition = 0;
7255     ITState.FirstCond = true;
7256     break;
7257   }
7258   }
7259   return false;
7260 }
7261 
7262 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7263   // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7264   // suffix depending on whether they're in an IT block or not.
7265   unsigned Opc = Inst.getOpcode();
7266   const MCInstrDesc &MCID = getInstDesc(Opc);
7267   if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7268     assert(MCID.hasOptionalDef() &&
7269            "optionally flag setting instruction missing optional def operand");
7270     assert(MCID.NumOperands == Inst.getNumOperands() &&
7271            "operand count mismatch!");
7272     // Find the optional-def operand (cc_out).
7273     unsigned OpNo;
7274     for (OpNo = 0;
7275          !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7276          ++OpNo)
7277       ;
7278     // If we're parsing Thumb1, reject it completely.
7279     if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7280       return Match_MnemonicFail;
7281     // If we're parsing Thumb2, which form is legal depends on whether we're
7282     // in an IT block.
7283     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7284         !inITBlock())
7285       return Match_RequiresITBlock;
7286     if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7287         inITBlock())
7288       return Match_RequiresNotITBlock;
7289   }
7290   // Some high-register supporting Thumb1 encodings only allow both registers
7291   // to be from r0-r7 when in Thumb2.
7292   else if (Opc == ARM::tADDhirr && isThumbOne() &&
7293            isARMLowRegister(Inst.getOperand(1).getReg()) &&
7294            isARMLowRegister(Inst.getOperand(2).getReg()))
7295     return Match_RequiresThumb2;
7296   // Others only require ARMv6 or later.
7297   else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7298            isARMLowRegister(Inst.getOperand(0).getReg()) &&
7299            isARMLowRegister(Inst.getOperand(1).getReg()))
7300     return Match_RequiresV6;
7301   return Match_Success;
7302 }
7303 
7304 static const char *getSubtargetFeatureName(unsigned Val);
7305 bool ARMAsmParser::
7306 MatchAndEmitInstruction(SMLoc IDLoc,
7307                         SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7308                         MCStreamer &Out) {
7309   MCInst Inst;
7310   unsigned ErrorInfo;
7311   unsigned MatchResult;
7312   MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
7313   switch (MatchResult) {
7314   default: break;
7315   case Match_Success:
7316     // Context sensitive operand constraints aren't handled by the matcher,
7317     // so check them here.
7318     if (validateInstruction(Inst, Operands)) {
7319       // Still progress the IT block, otherwise one wrong condition causes
7320       // nasty cascading errors.
7321       forwardITPosition();
7322       return true;
7323     }
7324 
7325     // Some instructions need post-processing to, for example, tweak which
7326     // encoding is selected. Loop on it while changes happen so the
7327     // individual transformations can chain off each other. E.g.,
7328     // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7329     while (processInstruction(Inst, Operands))
7330       ;
7331 
7332     // Only move forward at the very end so that everything in validate
7333     // and process gets a consistent answer about whether we're in an IT
7334     // block.
7335     forwardITPosition();
7336 
7337     // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7338     // doesn't actually encode.
7339     if (Inst.getOpcode() == ARM::ITasm)
7340       return false;
7341 
7342     Inst.setLoc(IDLoc);
7343     Out.EmitInstruction(Inst);
7344     return false;
7345   case Match_MissingFeature: {
7346     assert(ErrorInfo && "Unknown missing feature!");
7347     // Special case the error message for the very common case where only
7348     // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
7349     std::string Msg = "instruction requires:";
7350     unsigned Mask = 1;
7351     for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
7352       if (ErrorInfo & Mask) {
7353         Msg += " ";
7354         Msg += getSubtargetFeatureName(ErrorInfo & Mask);
7355       }
7356       Mask <<= 1;
7357     }
7358     return Error(IDLoc, Msg);
7359   }
7360   case Match_InvalidOperand: {
7361     SMLoc ErrorLoc = IDLoc;
7362     if (ErrorInfo != ~0U) {
7363       if (ErrorInfo >= Operands.size())
7364         return Error(IDLoc, "too few operands for instruction");
7365 
7366       ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7367       if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7368     }
7369 
7370     return Error(ErrorLoc, "invalid operand for instruction");
7371   }
7372   case Match_MnemonicFail:
7373     return Error(IDLoc, "invalid instruction",
7374                  ((ARMOperand*)Operands[0])->getLocRange());
7375   case Match_ConversionFail:
7376     // The converter function will have already emited a diagnostic.
7377     return true;
7378   case Match_RequiresNotITBlock:
7379     return Error(IDLoc, "flag setting instruction only valid outside IT block");
7380   case Match_RequiresITBlock:
7381     return Error(IDLoc, "instruction only valid inside IT block");
7382   case Match_RequiresV6:
7383     return Error(IDLoc, "instruction variant requires ARMv6 or later");
7384   case Match_RequiresThumb2:
7385     return Error(IDLoc, "instruction variant requires Thumb2");
7386   }
7387 
7388   llvm_unreachable("Implement any new match types added!");
7389 }
7390 
7391 /// parseDirective parses the arm specific directives
7392 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7393   StringRef IDVal = DirectiveID.getIdentifier();
7394   if (IDVal == ".word")
7395     return parseDirectiveWord(4, DirectiveID.getLoc());
7396   else if (IDVal == ".thumb")
7397     return parseDirectiveThumb(DirectiveID.getLoc());
7398   else if (IDVal == ".arm")
7399     return parseDirectiveARM(DirectiveID.getLoc());
7400   else if (IDVal == ".thumb_func")
7401     return parseDirectiveThumbFunc(DirectiveID.getLoc());
7402   else if (IDVal == ".code")
7403     return parseDirectiveCode(DirectiveID.getLoc());
7404   else if (IDVal == ".syntax")
7405     return parseDirectiveSyntax(DirectiveID.getLoc());
7406   else if (IDVal == ".unreq")
7407     return parseDirectiveUnreq(DirectiveID.getLoc());
7408   else if (IDVal == ".arch")
7409     return parseDirectiveArch(DirectiveID.getLoc());
7410   else if (IDVal == ".eabi_attribute")
7411     return parseDirectiveEabiAttr(DirectiveID.getLoc());
7412   return true;
7413 }
7414 
7415 /// parseDirectiveWord
7416 ///  ::= .word [ expression (, expression)* ]
7417 bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7418   if (getLexer().isNot(AsmToken::EndOfStatement)) {
7419     for (;;) {
7420       const MCExpr *Value;
7421       if (getParser().ParseExpression(Value))
7422         return true;
7423 
7424       getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
7425 
7426       if (getLexer().is(AsmToken::EndOfStatement))
7427         break;
7428 
7429       // FIXME: Improve diagnostic.
7430       if (getLexer().isNot(AsmToken::Comma))
7431         return Error(L, "unexpected token in directive");
7432       Parser.Lex();
7433     }
7434   }
7435 
7436   Parser.Lex();
7437   return false;
7438 }
7439 
7440 /// parseDirectiveThumb
7441 ///  ::= .thumb
7442 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7443   if (getLexer().isNot(AsmToken::EndOfStatement))
7444     return Error(L, "unexpected token in directive");
7445   Parser.Lex();
7446 
7447   if (!isThumb())
7448     SwitchMode();
7449   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7450   return false;
7451 }
7452 
7453 /// parseDirectiveARM
7454 ///  ::= .arm
7455 bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7456   if (getLexer().isNot(AsmToken::EndOfStatement))
7457     return Error(L, "unexpected token in directive");
7458   Parser.Lex();
7459 
7460   if (isThumb())
7461     SwitchMode();
7462   getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7463   return false;
7464 }
7465 
7466 /// parseDirectiveThumbFunc
7467 ///  ::= .thumbfunc symbol_name
7468 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7469   const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7470   bool isMachO = MAI.hasSubsectionsViaSymbols();
7471   StringRef Name;
7472   bool needFuncName = true;
7473 
7474   // Darwin asm has (optionally) function name after .thumb_func direction
7475   // ELF doesn't
7476   if (isMachO) {
7477     const AsmToken &Tok = Parser.getTok();
7478     if (Tok.isNot(AsmToken::EndOfStatement)) {
7479       if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7480         return Error(L, "unexpected token in .thumb_func directive");
7481       Name = Tok.getIdentifier();
7482       Parser.Lex(); // Consume the identifier token.
7483       needFuncName = false;
7484     }
7485   }
7486 
7487   if (getLexer().isNot(AsmToken::EndOfStatement))
7488     return Error(L, "unexpected token in directive");
7489 
7490   // Eat the end of statement and any blank lines that follow.
7491   while (getLexer().is(AsmToken::EndOfStatement))
7492     Parser.Lex();
7493 
7494   // FIXME: assuming function name will be the line following .thumb_func
7495   // We really should be checking the next symbol definition even if there's
7496   // stuff in between.
7497   if (needFuncName) {
7498     Name = Parser.getTok().getIdentifier();
7499   }
7500 
7501   // Mark symbol as a thumb symbol.
7502   MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7503   getParser().getStreamer().EmitThumbFunc(Func);
7504   return false;
7505 }
7506 
7507 /// parseDirectiveSyntax
7508 ///  ::= .syntax unified | divided
7509 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7510   const AsmToken &Tok = Parser.getTok();
7511   if (Tok.isNot(AsmToken::Identifier))
7512     return Error(L, "unexpected token in .syntax directive");
7513   StringRef Mode = Tok.getString();
7514   if (Mode == "unified" || Mode == "UNIFIED")
7515     Parser.Lex();
7516   else if (Mode == "divided" || Mode == "DIVIDED")
7517     return Error(L, "'.syntax divided' arm asssembly not supported");
7518   else
7519     return Error(L, "unrecognized syntax mode in .syntax directive");
7520 
7521   if (getLexer().isNot(AsmToken::EndOfStatement))
7522     return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7523   Parser.Lex();
7524 
7525   // TODO tell the MC streamer the mode
7526   // getParser().getStreamer().Emit???();
7527   return false;
7528 }
7529 
7530 /// parseDirectiveCode
7531 ///  ::= .code 16 | 32
7532 bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7533   const AsmToken &Tok = Parser.getTok();
7534   if (Tok.isNot(AsmToken::Integer))
7535     return Error(L, "unexpected token in .code directive");
7536   int64_t Val = Parser.getTok().getIntVal();
7537   if (Val == 16)
7538     Parser.Lex();
7539   else if (Val == 32)
7540     Parser.Lex();
7541   else
7542     return Error(L, "invalid operand to .code directive");
7543 
7544   if (getLexer().isNot(AsmToken::EndOfStatement))
7545     return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7546   Parser.Lex();
7547 
7548   if (Val == 16) {
7549     if (!isThumb())
7550       SwitchMode();
7551     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7552   } else {
7553     if (isThumb())
7554       SwitchMode();
7555     getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7556   }
7557 
7558   return false;
7559 }
7560 
7561 /// parseDirectiveReq
7562 ///  ::= name .req registername
7563 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7564   Parser.Lex(); // Eat the '.req' token.
7565   unsigned Reg;
7566   SMLoc SRegLoc, ERegLoc;
7567   if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7568     Parser.EatToEndOfStatement();
7569     return Error(SRegLoc, "register name expected");
7570   }
7571 
7572   // Shouldn't be anything else.
7573   if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7574     Parser.EatToEndOfStatement();
7575     return Error(Parser.getTok().getLoc(),
7576                  "unexpected input in .req directive.");
7577   }
7578 
7579   Parser.Lex(); // Consume the EndOfStatement
7580 
7581   if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7582     return Error(SRegLoc, "redefinition of '" + Name +
7583                           "' does not match original.");
7584 
7585   return false;
7586 }
7587 
7588 /// parseDirectiveUneq
7589 ///  ::= .unreq registername
7590 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7591   if (Parser.getTok().isNot(AsmToken::Identifier)) {
7592     Parser.EatToEndOfStatement();
7593     return Error(L, "unexpected input in .unreq directive.");
7594   }
7595   RegisterReqs.erase(Parser.getTok().getIdentifier());
7596   Parser.Lex(); // Eat the identifier.
7597   return false;
7598 }
7599 
7600 /// parseDirectiveArch
7601 ///  ::= .arch token
7602 bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7603   return true;
7604 }
7605 
7606 /// parseDirectiveEabiAttr
7607 ///  ::= .eabi_attribute int, int
7608 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7609   return true;
7610 }
7611 
7612 extern "C" void LLVMInitializeARMAsmLexer();
7613 
7614 /// Force static initialization.
7615 extern "C" void LLVMInitializeARMAsmParser() {
7616   RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7617   RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7618   LLVMInitializeARMAsmLexer();
7619 }
7620 
7621 #define GET_REGISTER_MATCHER
7622 #define GET_SUBTARGET_FEATURE_NAME
7623 #define GET_MATCHER_IMPLEMENTATION
7624 #include "ARMGenAsmMatcher.inc"
7625