1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "ARMFeatures.h" 11 #include "llvm/MC/MCTargetAsmParser.h" 12 #include "MCTargetDesc/ARMAddressingModes.h" 13 #include "MCTargetDesc/ARMBaseInfo.h" 14 #include "MCTargetDesc/ARMMCExpr.h" 15 #include "llvm/ADT/BitVector.h" 16 #include "llvm/ADT/OwningPtr.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallVector.h" 19 #include "llvm/ADT/StringSwitch.h" 20 #include "llvm/ADT/Twine.h" 21 #include "llvm/MC/MCAsmInfo.h" 22 #include "llvm/MC/MCAssembler.h" 23 #include "llvm/MC/MCContext.h" 24 #include "llvm/MC/MCELFStreamer.h" 25 #include "llvm/MC/MCExpr.h" 26 #include "llvm/MC/MCInst.h" 27 #include "llvm/MC/MCInstrDesc.h" 28 #include "llvm/MC/MCInstrInfo.h" 29 #include "llvm/MC/MCParser/MCAsmLexer.h" 30 #include "llvm/MC/MCParser/MCAsmParser.h" 31 #include "llvm/MC/MCParser/MCParsedAsmOperand.h" 32 #include "llvm/MC/MCRegisterInfo.h" 33 #include "llvm/MC/MCStreamer.h" 34 #include "llvm/MC/MCSubtargetInfo.h" 35 #include "llvm/Support/ELF.h" 36 #include "llvm/Support/MathExtras.h" 37 #include "llvm/Support/SourceMgr.h" 38 #include "llvm/Support/TargetRegistry.h" 39 #include "llvm/Support/raw_ostream.h" 40 41 using namespace llvm; 42 43 namespace { 44 45 class ARMOperand; 46 47 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; 48 49 class ARMAsmParser : public MCTargetAsmParser { 50 MCSubtargetInfo &STI; 51 MCAsmParser &Parser; 52 const MCInstrInfo &MII; 53 const MCRegisterInfo *MRI; 54 55 ARMTargetStreamer &getTargetStreamer() { 56 MCTargetStreamer &TS = getParser().getStreamer().getTargetStreamer(); 57 return static_cast<ARMTargetStreamer &>(TS); 58 } 59 60 // Unwind directives state 61 SMLoc FnStartLoc; 62 SMLoc CantUnwindLoc; 63 SMLoc PersonalityLoc; 64 SMLoc HandlerDataLoc; 65 int FPReg; 66 void resetUnwindDirectiveParserState() { 67 FnStartLoc = SMLoc(); 68 CantUnwindLoc = SMLoc(); 69 PersonalityLoc = SMLoc(); 70 HandlerDataLoc = SMLoc(); 71 FPReg = -1; 72 } 73 74 // Map of register aliases registers via the .req directive. 75 StringMap<unsigned> RegisterReqs; 76 77 struct { 78 ARMCC::CondCodes Cond; // Condition for IT block. 79 unsigned Mask:4; // Condition mask for instructions. 80 // Starting at first 1 (from lsb). 81 // '1' condition as indicated in IT. 82 // '0' inverse of condition (else). 83 // Count of instructions in IT block is 84 // 4 - trailingzeroes(mask) 85 86 bool FirstCond; // Explicit flag for when we're parsing the 87 // First instruction in the IT block. It's 88 // implied in the mask, so needs special 89 // handling. 90 91 unsigned CurPosition; // Current position in parsing of IT 92 // block. In range [0,3]. Initialized 93 // according to count of instructions in block. 94 // ~0U if no active IT block. 95 } ITState; 96 bool inITBlock() { return ITState.CurPosition != ~0U;} 97 void forwardITPosition() { 98 if (!inITBlock()) return; 99 // Move to the next instruction in the IT block, if there is one. If not, 100 // mark the block as done. 101 unsigned TZ = countTrailingZeros(ITState.Mask); 102 if (++ITState.CurPosition == 5 - TZ) 103 ITState.CurPosition = ~0U; // Done with the IT block after this. 104 } 105 106 107 MCAsmParser &getParser() const { return Parser; } 108 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 109 110 bool Warning(SMLoc L, const Twine &Msg, 111 ArrayRef<SMRange> Ranges = None) { 112 return Parser.Warning(L, Msg, Ranges); 113 } 114 bool Error(SMLoc L, const Twine &Msg, 115 ArrayRef<SMRange> Ranges = None) { 116 return Parser.Error(L, Msg, Ranges); 117 } 118 119 int tryParseRegister(); 120 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 121 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 122 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 123 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 124 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 125 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 126 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 127 unsigned &ShiftAmount); 128 bool parseDirectiveWord(unsigned Size, SMLoc L); 129 bool parseDirectiveThumb(SMLoc L); 130 bool parseDirectiveARM(SMLoc L); 131 bool parseDirectiveThumbFunc(SMLoc L); 132 bool parseDirectiveCode(SMLoc L); 133 bool parseDirectiveSyntax(SMLoc L); 134 bool parseDirectiveReq(StringRef Name, SMLoc L); 135 bool parseDirectiveUnreq(SMLoc L); 136 bool parseDirectiveArch(SMLoc L); 137 bool parseDirectiveEabiAttr(SMLoc L); 138 bool parseDirectiveFnStart(SMLoc L); 139 bool parseDirectiveFnEnd(SMLoc L); 140 bool parseDirectiveCantUnwind(SMLoc L); 141 bool parseDirectivePersonality(SMLoc L); 142 bool parseDirectiveHandlerData(SMLoc L); 143 bool parseDirectiveSetFP(SMLoc L); 144 bool parseDirectivePad(SMLoc L); 145 bool parseDirectiveRegSave(SMLoc L, bool IsVector); 146 147 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 148 bool &CarrySetting, unsigned &ProcessorIMod, 149 StringRef &ITMask); 150 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst, 151 bool &CanAcceptCarrySet, 152 bool &CanAcceptPredicationCode); 153 154 bool isThumb() const { 155 // FIXME: Can tablegen auto-generate this? 156 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 157 } 158 bool isThumbOne() const { 159 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 160 } 161 bool isThumbTwo() const { 162 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 163 } 164 bool hasThumb() const { 165 return STI.getFeatureBits() & ARM::HasV4TOps; 166 } 167 bool hasV6Ops() const { 168 return STI.getFeatureBits() & ARM::HasV6Ops; 169 } 170 bool hasV6MOps() const { 171 return STI.getFeatureBits() & ARM::HasV6MOps; 172 } 173 bool hasV7Ops() const { 174 return STI.getFeatureBits() & ARM::HasV7Ops; 175 } 176 bool hasV8Ops() const { 177 return STI.getFeatureBits() & ARM::HasV8Ops; 178 } 179 bool hasARM() const { 180 return !(STI.getFeatureBits() & ARM::FeatureNoARM); 181 } 182 183 void SwitchMode() { 184 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 185 setAvailableFeatures(FB); 186 } 187 bool isMClass() const { 188 return STI.getFeatureBits() & ARM::FeatureMClass; 189 } 190 191 /// @name Auto-generated Match Functions 192 /// { 193 194 #define GET_ASSEMBLER_HEADER 195 #include "ARMGenAsmMatcher.inc" 196 197 /// } 198 199 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 200 OperandMatchResultTy parseCoprocNumOperand( 201 SmallVectorImpl<MCParsedAsmOperand*>&); 202 OperandMatchResultTy parseCoprocRegOperand( 203 SmallVectorImpl<MCParsedAsmOperand*>&); 204 OperandMatchResultTy parseCoprocOptionOperand( 205 SmallVectorImpl<MCParsedAsmOperand*>&); 206 OperandMatchResultTy parseMemBarrierOptOperand( 207 SmallVectorImpl<MCParsedAsmOperand*>&); 208 OperandMatchResultTy parseInstSyncBarrierOptOperand( 209 SmallVectorImpl<MCParsedAsmOperand*>&); 210 OperandMatchResultTy parseProcIFlagsOperand( 211 SmallVectorImpl<MCParsedAsmOperand*>&); 212 OperandMatchResultTy parseMSRMaskOperand( 213 SmallVectorImpl<MCParsedAsmOperand*>&); 214 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 215 StringRef Op, int Low, int High); 216 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 217 return parsePKHImm(O, "lsl", 0, 31); 218 } 219 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 220 return parsePKHImm(O, "asr", 1, 32); 221 } 222 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 223 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 224 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 225 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 226 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 227 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 228 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 229 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 230 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, 231 SMLoc &EndLoc); 232 233 // Asm Match Converter Methods 234 void cvtThumbMultiply(MCInst &Inst, 235 const SmallVectorImpl<MCParsedAsmOperand*> &); 236 void cvtThumbBranches(MCInst &Inst, 237 const SmallVectorImpl<MCParsedAsmOperand*> &); 238 239 bool validateInstruction(MCInst &Inst, 240 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 241 bool processInstruction(MCInst &Inst, 242 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 243 bool shouldOmitCCOutOperand(StringRef Mnemonic, 244 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 245 bool shouldOmitPredicateOperand(StringRef Mnemonic, 246 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 247 public: 248 enum ARMMatchResultTy { 249 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 250 Match_RequiresNotITBlock, 251 Match_RequiresV6, 252 Match_RequiresThumb2, 253 #define GET_OPERAND_DIAGNOSTIC_TYPES 254 #include "ARMGenAsmMatcher.inc" 255 256 }; 257 258 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser, 259 const MCInstrInfo &MII) 260 : MCTargetAsmParser(), STI(_STI), Parser(_Parser), MII(MII), FPReg(-1) { 261 MCAsmParserExtension::Initialize(_Parser); 262 263 // Cache the MCRegisterInfo. 264 MRI = getContext().getRegisterInfo(); 265 266 // Initialize the set of available features. 267 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 268 269 // Not in an ITBlock to start with. 270 ITState.CurPosition = ~0U; 271 } 272 273 // Implementation of the MCTargetAsmParser interface: 274 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 275 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, 276 SMLoc NameLoc, 277 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 278 bool ParseDirective(AsmToken DirectiveID); 279 280 unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind); 281 unsigned checkTargetMatchPredicate(MCInst &Inst); 282 283 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, 284 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 285 MCStreamer &Out, unsigned &ErrorInfo, 286 bool MatchingInlineAsm); 287 }; 288 } // end anonymous namespace 289 290 namespace { 291 292 /// ARMOperand - Instances of this class represent a parsed ARM machine 293 /// operand. 294 class ARMOperand : public MCParsedAsmOperand { 295 enum KindTy { 296 k_CondCode, 297 k_CCOut, 298 k_ITCondMask, 299 k_CoprocNum, 300 k_CoprocReg, 301 k_CoprocOption, 302 k_Immediate, 303 k_MemBarrierOpt, 304 k_InstSyncBarrierOpt, 305 k_Memory, 306 k_PostIndexRegister, 307 k_MSRMask, 308 k_ProcIFlags, 309 k_VectorIndex, 310 k_Register, 311 k_RegisterList, 312 k_DPRRegisterList, 313 k_SPRRegisterList, 314 k_VectorList, 315 k_VectorListAllLanes, 316 k_VectorListIndexed, 317 k_ShiftedRegister, 318 k_ShiftedImmediate, 319 k_ShifterImmediate, 320 k_RotateImmediate, 321 k_BitfieldDescriptor, 322 k_Token 323 } Kind; 324 325 SMLoc StartLoc, EndLoc; 326 SmallVector<unsigned, 8> Registers; 327 328 struct CCOp { 329 ARMCC::CondCodes Val; 330 }; 331 332 struct CopOp { 333 unsigned Val; 334 }; 335 336 struct CoprocOptionOp { 337 unsigned Val; 338 }; 339 340 struct ITMaskOp { 341 unsigned Mask:4; 342 }; 343 344 struct MBOptOp { 345 ARM_MB::MemBOpt Val; 346 }; 347 348 struct ISBOptOp { 349 ARM_ISB::InstSyncBOpt Val; 350 }; 351 352 struct IFlagsOp { 353 ARM_PROC::IFlags Val; 354 }; 355 356 struct MMaskOp { 357 unsigned Val; 358 }; 359 360 struct TokOp { 361 const char *Data; 362 unsigned Length; 363 }; 364 365 struct RegOp { 366 unsigned RegNum; 367 }; 368 369 // A vector register list is a sequential list of 1 to 4 registers. 370 struct VectorListOp { 371 unsigned RegNum; 372 unsigned Count; 373 unsigned LaneIndex; 374 bool isDoubleSpaced; 375 }; 376 377 struct VectorIndexOp { 378 unsigned Val; 379 }; 380 381 struct ImmOp { 382 const MCExpr *Val; 383 }; 384 385 /// Combined record for all forms of ARM address expressions. 386 struct MemoryOp { 387 unsigned BaseRegNum; 388 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 389 // was specified. 390 const MCConstantExpr *OffsetImm; // Offset immediate value 391 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 392 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 393 unsigned ShiftImm; // shift for OffsetReg. 394 unsigned Alignment; // 0 = no alignment specified 395 // n = alignment in bytes (2, 4, 8, 16, or 32) 396 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 397 }; 398 399 struct PostIdxRegOp { 400 unsigned RegNum; 401 bool isAdd; 402 ARM_AM::ShiftOpc ShiftTy; 403 unsigned ShiftImm; 404 }; 405 406 struct ShifterImmOp { 407 bool isASR; 408 unsigned Imm; 409 }; 410 411 struct RegShiftedRegOp { 412 ARM_AM::ShiftOpc ShiftTy; 413 unsigned SrcReg; 414 unsigned ShiftReg; 415 unsigned ShiftImm; 416 }; 417 418 struct RegShiftedImmOp { 419 ARM_AM::ShiftOpc ShiftTy; 420 unsigned SrcReg; 421 unsigned ShiftImm; 422 }; 423 424 struct RotImmOp { 425 unsigned Imm; 426 }; 427 428 struct BitfieldOp { 429 unsigned LSB; 430 unsigned Width; 431 }; 432 433 union { 434 struct CCOp CC; 435 struct CopOp Cop; 436 struct CoprocOptionOp CoprocOption; 437 struct MBOptOp MBOpt; 438 struct ISBOptOp ISBOpt; 439 struct ITMaskOp ITMask; 440 struct IFlagsOp IFlags; 441 struct MMaskOp MMask; 442 struct TokOp Tok; 443 struct RegOp Reg; 444 struct VectorListOp VectorList; 445 struct VectorIndexOp VectorIndex; 446 struct ImmOp Imm; 447 struct MemoryOp Memory; 448 struct PostIdxRegOp PostIdxReg; 449 struct ShifterImmOp ShifterImm; 450 struct RegShiftedRegOp RegShiftedReg; 451 struct RegShiftedImmOp RegShiftedImm; 452 struct RotImmOp RotImm; 453 struct BitfieldOp Bitfield; 454 }; 455 456 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 457 public: 458 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 459 Kind = o.Kind; 460 StartLoc = o.StartLoc; 461 EndLoc = o.EndLoc; 462 switch (Kind) { 463 case k_CondCode: 464 CC = o.CC; 465 break; 466 case k_ITCondMask: 467 ITMask = o.ITMask; 468 break; 469 case k_Token: 470 Tok = o.Tok; 471 break; 472 case k_CCOut: 473 case k_Register: 474 Reg = o.Reg; 475 break; 476 case k_RegisterList: 477 case k_DPRRegisterList: 478 case k_SPRRegisterList: 479 Registers = o.Registers; 480 break; 481 case k_VectorList: 482 case k_VectorListAllLanes: 483 case k_VectorListIndexed: 484 VectorList = o.VectorList; 485 break; 486 case k_CoprocNum: 487 case k_CoprocReg: 488 Cop = o.Cop; 489 break; 490 case k_CoprocOption: 491 CoprocOption = o.CoprocOption; 492 break; 493 case k_Immediate: 494 Imm = o.Imm; 495 break; 496 case k_MemBarrierOpt: 497 MBOpt = o.MBOpt; 498 break; 499 case k_InstSyncBarrierOpt: 500 ISBOpt = o.ISBOpt; 501 case k_Memory: 502 Memory = o.Memory; 503 break; 504 case k_PostIndexRegister: 505 PostIdxReg = o.PostIdxReg; 506 break; 507 case k_MSRMask: 508 MMask = o.MMask; 509 break; 510 case k_ProcIFlags: 511 IFlags = o.IFlags; 512 break; 513 case k_ShifterImmediate: 514 ShifterImm = o.ShifterImm; 515 break; 516 case k_ShiftedRegister: 517 RegShiftedReg = o.RegShiftedReg; 518 break; 519 case k_ShiftedImmediate: 520 RegShiftedImm = o.RegShiftedImm; 521 break; 522 case k_RotateImmediate: 523 RotImm = o.RotImm; 524 break; 525 case k_BitfieldDescriptor: 526 Bitfield = o.Bitfield; 527 break; 528 case k_VectorIndex: 529 VectorIndex = o.VectorIndex; 530 break; 531 } 532 } 533 534 /// getStartLoc - Get the location of the first token of this operand. 535 SMLoc getStartLoc() const { return StartLoc; } 536 /// getEndLoc - Get the location of the last token of this operand. 537 SMLoc getEndLoc() const { return EndLoc; } 538 /// getLocRange - Get the range between the first and last token of this 539 /// operand. 540 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } 541 542 ARMCC::CondCodes getCondCode() const { 543 assert(Kind == k_CondCode && "Invalid access!"); 544 return CC.Val; 545 } 546 547 unsigned getCoproc() const { 548 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 549 return Cop.Val; 550 } 551 552 StringRef getToken() const { 553 assert(Kind == k_Token && "Invalid access!"); 554 return StringRef(Tok.Data, Tok.Length); 555 } 556 557 unsigned getReg() const { 558 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 559 return Reg.RegNum; 560 } 561 562 const SmallVectorImpl<unsigned> &getRegList() const { 563 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 564 Kind == k_SPRRegisterList) && "Invalid access!"); 565 return Registers; 566 } 567 568 const MCExpr *getImm() const { 569 assert(isImm() && "Invalid access!"); 570 return Imm.Val; 571 } 572 573 unsigned getVectorIndex() const { 574 assert(Kind == k_VectorIndex && "Invalid access!"); 575 return VectorIndex.Val; 576 } 577 578 ARM_MB::MemBOpt getMemBarrierOpt() const { 579 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 580 return MBOpt.Val; 581 } 582 583 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const { 584 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!"); 585 return ISBOpt.Val; 586 } 587 588 ARM_PROC::IFlags getProcIFlags() const { 589 assert(Kind == k_ProcIFlags && "Invalid access!"); 590 return IFlags.Val; 591 } 592 593 unsigned getMSRMask() const { 594 assert(Kind == k_MSRMask && "Invalid access!"); 595 return MMask.Val; 596 } 597 598 bool isCoprocNum() const { return Kind == k_CoprocNum; } 599 bool isCoprocReg() const { return Kind == k_CoprocReg; } 600 bool isCoprocOption() const { return Kind == k_CoprocOption; } 601 bool isCondCode() const { return Kind == k_CondCode; } 602 bool isCCOut() const { return Kind == k_CCOut; } 603 bool isITMask() const { return Kind == k_ITCondMask; } 604 bool isITCondCode() const { return Kind == k_CondCode; } 605 bool isImm() const { return Kind == k_Immediate; } 606 // checks whether this operand is an unsigned offset which fits is a field 607 // of specified width and scaled by a specific number of bits 608 template<unsigned width, unsigned scale> 609 bool isUnsignedOffset() const { 610 if (!isImm()) return false; 611 if (isa<MCSymbolRefExpr>(Imm.Val)) return true; 612 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { 613 int64_t Val = CE->getValue(); 614 int64_t Align = 1LL << scale; 615 int64_t Max = Align * ((1LL << width) - 1); 616 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max); 617 } 618 return false; 619 } 620 // checks whether this operand is an signed offset which fits is a field 621 // of specified width and scaled by a specific number of bits 622 template<unsigned width, unsigned scale> 623 bool isSignedOffset() const { 624 if (!isImm()) return false; 625 if (isa<MCSymbolRefExpr>(Imm.Val)) return true; 626 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { 627 int64_t Val = CE->getValue(); 628 int64_t Align = 1LL << scale; 629 int64_t Max = Align * ((1LL << (width-1)) - 1); 630 int64_t Min = -Align * (1LL << (width-1)); 631 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max); 632 } 633 return false; 634 } 635 636 // checks whether this operand is a memory operand computed as an offset 637 // applied to PC. the offset may have 8 bits of magnitude and is represented 638 // with two bits of shift. textually it may be either [pc, #imm], #imm or 639 // relocable expression... 640 bool isThumbMemPC() const { 641 int64_t Val = 0; 642 if (isImm()) { 643 if (isa<MCSymbolRefExpr>(Imm.Val)) return true; 644 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val); 645 if (!CE) return false; 646 Val = CE->getValue(); 647 } 648 else if (isMem()) { 649 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false; 650 if(Memory.BaseRegNum != ARM::PC) return false; 651 Val = Memory.OffsetImm->getValue(); 652 } 653 else return false; 654 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020); 655 } 656 bool isFPImm() const { 657 if (!isImm()) return false; 658 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 659 if (!CE) return false; 660 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); 661 return Val != -1; 662 } 663 bool isFBits16() const { 664 if (!isImm()) return false; 665 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 666 if (!CE) return false; 667 int64_t Value = CE->getValue(); 668 return Value >= 0 && Value <= 16; 669 } 670 bool isFBits32() const { 671 if (!isImm()) return false; 672 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 673 if (!CE) return false; 674 int64_t Value = CE->getValue(); 675 return Value >= 1 && Value <= 32; 676 } 677 bool isImm8s4() const { 678 if (!isImm()) return false; 679 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 680 if (!CE) return false; 681 int64_t Value = CE->getValue(); 682 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 683 } 684 bool isImm0_1020s4() const { 685 if (!isImm()) return false; 686 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 687 if (!CE) return false; 688 int64_t Value = CE->getValue(); 689 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 690 } 691 bool isImm0_508s4() const { 692 if (!isImm()) return false; 693 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 694 if (!CE) return false; 695 int64_t Value = CE->getValue(); 696 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 697 } 698 bool isImm0_508s4Neg() const { 699 if (!isImm()) return false; 700 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 701 if (!CE) return false; 702 int64_t Value = -CE->getValue(); 703 // explicitly exclude zero. we want that to use the normal 0_508 version. 704 return ((Value & 3) == 0) && Value > 0 && Value <= 508; 705 } 706 bool isImm0_255() const { 707 if (!isImm()) return false; 708 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 709 if (!CE) return false; 710 int64_t Value = CE->getValue(); 711 return Value >= 0 && Value < 256; 712 } 713 bool isImm0_4095() const { 714 if (!isImm()) return false; 715 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 716 if (!CE) return false; 717 int64_t Value = CE->getValue(); 718 return Value >= 0 && Value < 4096; 719 } 720 bool isImm0_4095Neg() const { 721 if (!isImm()) return false; 722 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 723 if (!CE) return false; 724 int64_t Value = -CE->getValue(); 725 return Value > 0 && Value < 4096; 726 } 727 bool isImm0_1() const { 728 if (!isImm()) return false; 729 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 730 if (!CE) return false; 731 int64_t Value = CE->getValue(); 732 return Value >= 0 && Value < 2; 733 } 734 bool isImm0_3() const { 735 if (!isImm()) return false; 736 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 737 if (!CE) return false; 738 int64_t Value = CE->getValue(); 739 return Value >= 0 && Value < 4; 740 } 741 bool isImm0_7() const { 742 if (!isImm()) return false; 743 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 744 if (!CE) return false; 745 int64_t Value = CE->getValue(); 746 return Value >= 0 && Value < 8; 747 } 748 bool isImm0_15() const { 749 if (!isImm()) return false; 750 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 751 if (!CE) return false; 752 int64_t Value = CE->getValue(); 753 return Value >= 0 && Value < 16; 754 } 755 bool isImm0_31() const { 756 if (!isImm()) return false; 757 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 758 if (!CE) return false; 759 int64_t Value = CE->getValue(); 760 return Value >= 0 && Value < 32; 761 } 762 bool isImm0_63() const { 763 if (!isImm()) return false; 764 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 765 if (!CE) return false; 766 int64_t Value = CE->getValue(); 767 return Value >= 0 && Value < 64; 768 } 769 bool isImm8() const { 770 if (!isImm()) return false; 771 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 772 if (!CE) return false; 773 int64_t Value = CE->getValue(); 774 return Value == 8; 775 } 776 bool isImm16() const { 777 if (!isImm()) return false; 778 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 779 if (!CE) return false; 780 int64_t Value = CE->getValue(); 781 return Value == 16; 782 } 783 bool isImm32() const { 784 if (!isImm()) return false; 785 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 786 if (!CE) return false; 787 int64_t Value = CE->getValue(); 788 return Value == 32; 789 } 790 bool isShrImm8() const { 791 if (!isImm()) return false; 792 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 793 if (!CE) return false; 794 int64_t Value = CE->getValue(); 795 return Value > 0 && Value <= 8; 796 } 797 bool isShrImm16() const { 798 if (!isImm()) return false; 799 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 800 if (!CE) return false; 801 int64_t Value = CE->getValue(); 802 return Value > 0 && Value <= 16; 803 } 804 bool isShrImm32() const { 805 if (!isImm()) return false; 806 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 807 if (!CE) return false; 808 int64_t Value = CE->getValue(); 809 return Value > 0 && Value <= 32; 810 } 811 bool isShrImm64() const { 812 if (!isImm()) return false; 813 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 814 if (!CE) return false; 815 int64_t Value = CE->getValue(); 816 return Value > 0 && Value <= 64; 817 } 818 bool isImm1_7() const { 819 if (!isImm()) return false; 820 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 821 if (!CE) return false; 822 int64_t Value = CE->getValue(); 823 return Value > 0 && Value < 8; 824 } 825 bool isImm1_15() const { 826 if (!isImm()) return false; 827 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 828 if (!CE) return false; 829 int64_t Value = CE->getValue(); 830 return Value > 0 && Value < 16; 831 } 832 bool isImm1_31() const { 833 if (!isImm()) return false; 834 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 835 if (!CE) return false; 836 int64_t Value = CE->getValue(); 837 return Value > 0 && Value < 32; 838 } 839 bool isImm1_16() const { 840 if (!isImm()) return false; 841 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 842 if (!CE) return false; 843 int64_t Value = CE->getValue(); 844 return Value > 0 && Value < 17; 845 } 846 bool isImm1_32() const { 847 if (!isImm()) return false; 848 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 849 if (!CE) return false; 850 int64_t Value = CE->getValue(); 851 return Value > 0 && Value < 33; 852 } 853 bool isImm0_32() const { 854 if (!isImm()) return false; 855 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 856 if (!CE) return false; 857 int64_t Value = CE->getValue(); 858 return Value >= 0 && Value < 33; 859 } 860 bool isImm0_65535() const { 861 if (!isImm()) return false; 862 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 863 if (!CE) return false; 864 int64_t Value = CE->getValue(); 865 return Value >= 0 && Value < 65536; 866 } 867 bool isImm256_65535Expr() const { 868 if (!isImm()) return false; 869 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 870 // If it's not a constant expression, it'll generate a fixup and be 871 // handled later. 872 if (!CE) return true; 873 int64_t Value = CE->getValue(); 874 return Value >= 256 && Value < 65536; 875 } 876 bool isImm0_65535Expr() const { 877 if (!isImm()) return false; 878 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 879 // If it's not a constant expression, it'll generate a fixup and be 880 // handled later. 881 if (!CE) return true; 882 int64_t Value = CE->getValue(); 883 return Value >= 0 && Value < 65536; 884 } 885 bool isImm24bit() const { 886 if (!isImm()) return false; 887 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 888 if (!CE) return false; 889 int64_t Value = CE->getValue(); 890 return Value >= 0 && Value <= 0xffffff; 891 } 892 bool isImmThumbSR() const { 893 if (!isImm()) return false; 894 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 895 if (!CE) return false; 896 int64_t Value = CE->getValue(); 897 return Value > 0 && Value < 33; 898 } 899 bool isPKHLSLImm() const { 900 if (!isImm()) return false; 901 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 902 if (!CE) return false; 903 int64_t Value = CE->getValue(); 904 return Value >= 0 && Value < 32; 905 } 906 bool isPKHASRImm() const { 907 if (!isImm()) return false; 908 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 909 if (!CE) return false; 910 int64_t Value = CE->getValue(); 911 return Value > 0 && Value <= 32; 912 } 913 bool isAdrLabel() const { 914 // If we have an immediate that's not a constant, treat it as a label 915 // reference needing a fixup. If it is a constant, but it can't fit 916 // into shift immediate encoding, we reject it. 917 if (isImm() && !isa<MCConstantExpr>(getImm())) return true; 918 else return (isARMSOImm() || isARMSOImmNeg()); 919 } 920 bool isARMSOImm() const { 921 if (!isImm()) return false; 922 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 923 if (!CE) return false; 924 int64_t Value = CE->getValue(); 925 return ARM_AM::getSOImmVal(Value) != -1; 926 } 927 bool isARMSOImmNot() const { 928 if (!isImm()) return false; 929 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 930 if (!CE) return false; 931 int64_t Value = CE->getValue(); 932 return ARM_AM::getSOImmVal(~Value) != -1; 933 } 934 bool isARMSOImmNeg() const { 935 if (!isImm()) return false; 936 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 937 if (!CE) return false; 938 int64_t Value = CE->getValue(); 939 // Only use this when not representable as a plain so_imm. 940 return ARM_AM::getSOImmVal(Value) == -1 && 941 ARM_AM::getSOImmVal(-Value) != -1; 942 } 943 bool isT2SOImm() const { 944 if (!isImm()) return false; 945 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 946 if (!CE) return false; 947 int64_t Value = CE->getValue(); 948 return ARM_AM::getT2SOImmVal(Value) != -1; 949 } 950 bool isT2SOImmNot() const { 951 if (!isImm()) return false; 952 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 953 if (!CE) return false; 954 int64_t Value = CE->getValue(); 955 return ARM_AM::getT2SOImmVal(Value) == -1 && 956 ARM_AM::getT2SOImmVal(~Value) != -1; 957 } 958 bool isT2SOImmNeg() const { 959 if (!isImm()) return false; 960 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 961 if (!CE) return false; 962 int64_t Value = CE->getValue(); 963 // Only use this when not representable as a plain so_imm. 964 return ARM_AM::getT2SOImmVal(Value) == -1 && 965 ARM_AM::getT2SOImmVal(-Value) != -1; 966 } 967 bool isSetEndImm() const { 968 if (!isImm()) return false; 969 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 970 if (!CE) return false; 971 int64_t Value = CE->getValue(); 972 return Value == 1 || Value == 0; 973 } 974 bool isReg() const { return Kind == k_Register; } 975 bool isRegList() const { return Kind == k_RegisterList; } 976 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 977 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 978 bool isToken() const { return Kind == k_Token; } 979 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 980 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; } 981 bool isMem() const { return Kind == k_Memory; } 982 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 983 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 984 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 985 bool isRotImm() const { return Kind == k_RotateImmediate; } 986 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 987 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 988 bool isPostIdxReg() const { 989 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; 990 } 991 bool isMemNoOffset(bool alignOK = false) const { 992 if (!isMem()) 993 return false; 994 // No offset of any kind. 995 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 996 (alignOK || Memory.Alignment == 0); 997 } 998 bool isMemPCRelImm12() const { 999 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1000 return false; 1001 // Base register must be PC. 1002 if (Memory.BaseRegNum != ARM::PC) 1003 return false; 1004 // Immediate offset in range [-4095, 4095]. 1005 if (!Memory.OffsetImm) return true; 1006 int64_t Val = Memory.OffsetImm->getValue(); 1007 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 1008 } 1009 bool isAlignedMemory() const { 1010 return isMemNoOffset(true); 1011 } 1012 bool isAddrMode2() const { 1013 if (!isMem() || Memory.Alignment != 0) return false; 1014 // Check for register offset. 1015 if (Memory.OffsetRegNum) return true; 1016 // Immediate offset in range [-4095, 4095]. 1017 if (!Memory.OffsetImm) return true; 1018 int64_t Val = Memory.OffsetImm->getValue(); 1019 return Val > -4096 && Val < 4096; 1020 } 1021 bool isAM2OffsetImm() const { 1022 if (!isImm()) return false; 1023 // Immediate offset in range [-4095, 4095]. 1024 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1025 if (!CE) return false; 1026 int64_t Val = CE->getValue(); 1027 return (Val == INT32_MIN) || (Val > -4096 && Val < 4096); 1028 } 1029 bool isAddrMode3() const { 1030 // If we have an immediate that's not a constant, treat it as a label 1031 // reference needing a fixup. If it is a constant, it's something else 1032 // and we reject it. 1033 if (isImm() && !isa<MCConstantExpr>(getImm())) 1034 return true; 1035 if (!isMem() || Memory.Alignment != 0) return false; 1036 // No shifts are legal for AM3. 1037 if (Memory.ShiftType != ARM_AM::no_shift) return false; 1038 // Check for register offset. 1039 if (Memory.OffsetRegNum) return true; 1040 // Immediate offset in range [-255, 255]. 1041 if (!Memory.OffsetImm) return true; 1042 int64_t Val = Memory.OffsetImm->getValue(); 1043 // The #-0 offset is encoded as INT32_MIN, and we have to check 1044 // for this too. 1045 return (Val > -256 && Val < 256) || Val == INT32_MIN; 1046 } 1047 bool isAM3Offset() const { 1048 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 1049 return false; 1050 if (Kind == k_PostIndexRegister) 1051 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 1052 // Immediate offset in range [-255, 255]. 1053 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1054 if (!CE) return false; 1055 int64_t Val = CE->getValue(); 1056 // Special case, #-0 is INT32_MIN. 1057 return (Val > -256 && Val < 256) || Val == INT32_MIN; 1058 } 1059 bool isAddrMode5() const { 1060 // If we have an immediate that's not a constant, treat it as a label 1061 // reference needing a fixup. If it is a constant, it's something else 1062 // and we reject it. 1063 if (isImm() && !isa<MCConstantExpr>(getImm())) 1064 return true; 1065 if (!isMem() || Memory.Alignment != 0) return false; 1066 // Check for register offset. 1067 if (Memory.OffsetRegNum) return false; 1068 // Immediate offset in range [-1020, 1020] and a multiple of 4. 1069 if (!Memory.OffsetImm) return true; 1070 int64_t Val = Memory.OffsetImm->getValue(); 1071 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 1072 Val == INT32_MIN; 1073 } 1074 bool isMemTBB() const { 1075 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || 1076 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 1077 return false; 1078 return true; 1079 } 1080 bool isMemTBH() const { 1081 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || 1082 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 1083 Memory.Alignment != 0 ) 1084 return false; 1085 return true; 1086 } 1087 bool isMemRegOffset() const { 1088 if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0) 1089 return false; 1090 return true; 1091 } 1092 bool isT2MemRegOffset() const { 1093 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || 1094 Memory.Alignment != 0) 1095 return false; 1096 // Only lsl #{0, 1, 2, 3} allowed. 1097 if (Memory.ShiftType == ARM_AM::no_shift) 1098 return true; 1099 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 1100 return false; 1101 return true; 1102 } 1103 bool isMemThumbRR() const { 1104 // Thumb reg+reg addressing is simple. Just two registers, a base and 1105 // an offset. No shifts, negations or any other complicating factors. 1106 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || 1107 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 1108 return false; 1109 return isARMLowRegister(Memory.BaseRegNum) && 1110 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 1111 } 1112 bool isMemThumbRIs4() const { 1113 if (!isMem() || Memory.OffsetRegNum != 0 || 1114 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 1115 return false; 1116 // Immediate offset, multiple of 4 in range [0, 124]. 1117 if (!Memory.OffsetImm) return true; 1118 int64_t Val = Memory.OffsetImm->getValue(); 1119 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 1120 } 1121 bool isMemThumbRIs2() const { 1122 if (!isMem() || Memory.OffsetRegNum != 0 || 1123 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 1124 return false; 1125 // Immediate offset, multiple of 4 in range [0, 62]. 1126 if (!Memory.OffsetImm) return true; 1127 int64_t Val = Memory.OffsetImm->getValue(); 1128 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 1129 } 1130 bool isMemThumbRIs1() const { 1131 if (!isMem() || Memory.OffsetRegNum != 0 || 1132 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 1133 return false; 1134 // Immediate offset in range [0, 31]. 1135 if (!Memory.OffsetImm) return true; 1136 int64_t Val = Memory.OffsetImm->getValue(); 1137 return Val >= 0 && Val <= 31; 1138 } 1139 bool isMemThumbSPI() const { 1140 if (!isMem() || Memory.OffsetRegNum != 0 || 1141 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 1142 return false; 1143 // Immediate offset, multiple of 4 in range [0, 1020]. 1144 if (!Memory.OffsetImm) return true; 1145 int64_t Val = Memory.OffsetImm->getValue(); 1146 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 1147 } 1148 bool isMemImm8s4Offset() const { 1149 // If we have an immediate that's not a constant, treat it as a label 1150 // reference needing a fixup. If it is a constant, it's something else 1151 // and we reject it. 1152 if (isImm() && !isa<MCConstantExpr>(getImm())) 1153 return true; 1154 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1155 return false; 1156 // Immediate offset a multiple of 4 in range [-1020, 1020]. 1157 if (!Memory.OffsetImm) return true; 1158 int64_t Val = Memory.OffsetImm->getValue(); 1159 // Special case, #-0 is INT32_MIN. 1160 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN; 1161 } 1162 bool isMemImm0_1020s4Offset() const { 1163 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1164 return false; 1165 // Immediate offset a multiple of 4 in range [0, 1020]. 1166 if (!Memory.OffsetImm) return true; 1167 int64_t Val = Memory.OffsetImm->getValue(); 1168 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 1169 } 1170 bool isMemImm8Offset() const { 1171 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1172 return false; 1173 // Base reg of PC isn't allowed for these encodings. 1174 if (Memory.BaseRegNum == ARM::PC) return false; 1175 // Immediate offset in range [-255, 255]. 1176 if (!Memory.OffsetImm) return true; 1177 int64_t Val = Memory.OffsetImm->getValue(); 1178 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 1179 } 1180 bool isMemPosImm8Offset() const { 1181 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1182 return false; 1183 // Immediate offset in range [0, 255]. 1184 if (!Memory.OffsetImm) return true; 1185 int64_t Val = Memory.OffsetImm->getValue(); 1186 return Val >= 0 && Val < 256; 1187 } 1188 bool isMemNegImm8Offset() const { 1189 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1190 return false; 1191 // Base reg of PC isn't allowed for these encodings. 1192 if (Memory.BaseRegNum == ARM::PC) return false; 1193 // Immediate offset in range [-255, -1]. 1194 if (!Memory.OffsetImm) return false; 1195 int64_t Val = Memory.OffsetImm->getValue(); 1196 return (Val == INT32_MIN) || (Val > -256 && Val < 0); 1197 } 1198 bool isMemUImm12Offset() const { 1199 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1200 return false; 1201 // Immediate offset in range [0, 4095]. 1202 if (!Memory.OffsetImm) return true; 1203 int64_t Val = Memory.OffsetImm->getValue(); 1204 return (Val >= 0 && Val < 4096); 1205 } 1206 bool isMemImm12Offset() const { 1207 // If we have an immediate that's not a constant, treat it as a label 1208 // reference needing a fixup. If it is a constant, it's something else 1209 // and we reject it. 1210 if (isImm() && !isa<MCConstantExpr>(getImm())) 1211 return true; 1212 1213 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1214 return false; 1215 // Immediate offset in range [-4095, 4095]. 1216 if (!Memory.OffsetImm) return true; 1217 int64_t Val = Memory.OffsetImm->getValue(); 1218 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 1219 } 1220 bool isPostIdxImm8() const { 1221 if (!isImm()) return false; 1222 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1223 if (!CE) return false; 1224 int64_t Val = CE->getValue(); 1225 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 1226 } 1227 bool isPostIdxImm8s4() const { 1228 if (!isImm()) return false; 1229 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1230 if (!CE) return false; 1231 int64_t Val = CE->getValue(); 1232 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 1233 (Val == INT32_MIN); 1234 } 1235 1236 bool isMSRMask() const { return Kind == k_MSRMask; } 1237 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 1238 1239 // NEON operands. 1240 bool isSingleSpacedVectorList() const { 1241 return Kind == k_VectorList && !VectorList.isDoubleSpaced; 1242 } 1243 bool isDoubleSpacedVectorList() const { 1244 return Kind == k_VectorList && VectorList.isDoubleSpaced; 1245 } 1246 bool isVecListOneD() const { 1247 if (!isSingleSpacedVectorList()) return false; 1248 return VectorList.Count == 1; 1249 } 1250 1251 bool isVecListDPair() const { 1252 if (!isSingleSpacedVectorList()) return false; 1253 return (ARMMCRegisterClasses[ARM::DPairRegClassID] 1254 .contains(VectorList.RegNum)); 1255 } 1256 1257 bool isVecListThreeD() const { 1258 if (!isSingleSpacedVectorList()) return false; 1259 return VectorList.Count == 3; 1260 } 1261 1262 bool isVecListFourD() const { 1263 if (!isSingleSpacedVectorList()) return false; 1264 return VectorList.Count == 4; 1265 } 1266 1267 bool isVecListDPairSpaced() const { 1268 if (isSingleSpacedVectorList()) return false; 1269 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID] 1270 .contains(VectorList.RegNum)); 1271 } 1272 1273 bool isVecListThreeQ() const { 1274 if (!isDoubleSpacedVectorList()) return false; 1275 return VectorList.Count == 3; 1276 } 1277 1278 bool isVecListFourQ() const { 1279 if (!isDoubleSpacedVectorList()) return false; 1280 return VectorList.Count == 4; 1281 } 1282 1283 bool isSingleSpacedVectorAllLanes() const { 1284 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced; 1285 } 1286 bool isDoubleSpacedVectorAllLanes() const { 1287 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced; 1288 } 1289 bool isVecListOneDAllLanes() const { 1290 if (!isSingleSpacedVectorAllLanes()) return false; 1291 return VectorList.Count == 1; 1292 } 1293 1294 bool isVecListDPairAllLanes() const { 1295 if (!isSingleSpacedVectorAllLanes()) return false; 1296 return (ARMMCRegisterClasses[ARM::DPairRegClassID] 1297 .contains(VectorList.RegNum)); 1298 } 1299 1300 bool isVecListDPairSpacedAllLanes() const { 1301 if (!isDoubleSpacedVectorAllLanes()) return false; 1302 return VectorList.Count == 2; 1303 } 1304 1305 bool isVecListThreeDAllLanes() const { 1306 if (!isSingleSpacedVectorAllLanes()) return false; 1307 return VectorList.Count == 3; 1308 } 1309 1310 bool isVecListThreeQAllLanes() const { 1311 if (!isDoubleSpacedVectorAllLanes()) return false; 1312 return VectorList.Count == 3; 1313 } 1314 1315 bool isVecListFourDAllLanes() const { 1316 if (!isSingleSpacedVectorAllLanes()) return false; 1317 return VectorList.Count == 4; 1318 } 1319 1320 bool isVecListFourQAllLanes() const { 1321 if (!isDoubleSpacedVectorAllLanes()) return false; 1322 return VectorList.Count == 4; 1323 } 1324 1325 bool isSingleSpacedVectorIndexed() const { 1326 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced; 1327 } 1328 bool isDoubleSpacedVectorIndexed() const { 1329 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced; 1330 } 1331 bool isVecListOneDByteIndexed() const { 1332 if (!isSingleSpacedVectorIndexed()) return false; 1333 return VectorList.Count == 1 && VectorList.LaneIndex <= 7; 1334 } 1335 1336 bool isVecListOneDHWordIndexed() const { 1337 if (!isSingleSpacedVectorIndexed()) return false; 1338 return VectorList.Count == 1 && VectorList.LaneIndex <= 3; 1339 } 1340 1341 bool isVecListOneDWordIndexed() const { 1342 if (!isSingleSpacedVectorIndexed()) return false; 1343 return VectorList.Count == 1 && VectorList.LaneIndex <= 1; 1344 } 1345 1346 bool isVecListTwoDByteIndexed() const { 1347 if (!isSingleSpacedVectorIndexed()) return false; 1348 return VectorList.Count == 2 && VectorList.LaneIndex <= 7; 1349 } 1350 1351 bool isVecListTwoDHWordIndexed() const { 1352 if (!isSingleSpacedVectorIndexed()) return false; 1353 return VectorList.Count == 2 && VectorList.LaneIndex <= 3; 1354 } 1355 1356 bool isVecListTwoQWordIndexed() const { 1357 if (!isDoubleSpacedVectorIndexed()) return false; 1358 return VectorList.Count == 2 && VectorList.LaneIndex <= 1; 1359 } 1360 1361 bool isVecListTwoQHWordIndexed() const { 1362 if (!isDoubleSpacedVectorIndexed()) return false; 1363 return VectorList.Count == 2 && VectorList.LaneIndex <= 3; 1364 } 1365 1366 bool isVecListTwoDWordIndexed() const { 1367 if (!isSingleSpacedVectorIndexed()) return false; 1368 return VectorList.Count == 2 && VectorList.LaneIndex <= 1; 1369 } 1370 1371 bool isVecListThreeDByteIndexed() const { 1372 if (!isSingleSpacedVectorIndexed()) return false; 1373 return VectorList.Count == 3 && VectorList.LaneIndex <= 7; 1374 } 1375 1376 bool isVecListThreeDHWordIndexed() const { 1377 if (!isSingleSpacedVectorIndexed()) return false; 1378 return VectorList.Count == 3 && VectorList.LaneIndex <= 3; 1379 } 1380 1381 bool isVecListThreeQWordIndexed() const { 1382 if (!isDoubleSpacedVectorIndexed()) return false; 1383 return VectorList.Count == 3 && VectorList.LaneIndex <= 1; 1384 } 1385 1386 bool isVecListThreeQHWordIndexed() const { 1387 if (!isDoubleSpacedVectorIndexed()) return false; 1388 return VectorList.Count == 3 && VectorList.LaneIndex <= 3; 1389 } 1390 1391 bool isVecListThreeDWordIndexed() const { 1392 if (!isSingleSpacedVectorIndexed()) return false; 1393 return VectorList.Count == 3 && VectorList.LaneIndex <= 1; 1394 } 1395 1396 bool isVecListFourDByteIndexed() const { 1397 if (!isSingleSpacedVectorIndexed()) return false; 1398 return VectorList.Count == 4 && VectorList.LaneIndex <= 7; 1399 } 1400 1401 bool isVecListFourDHWordIndexed() const { 1402 if (!isSingleSpacedVectorIndexed()) return false; 1403 return VectorList.Count == 4 && VectorList.LaneIndex <= 3; 1404 } 1405 1406 bool isVecListFourQWordIndexed() const { 1407 if (!isDoubleSpacedVectorIndexed()) return false; 1408 return VectorList.Count == 4 && VectorList.LaneIndex <= 1; 1409 } 1410 1411 bool isVecListFourQHWordIndexed() const { 1412 if (!isDoubleSpacedVectorIndexed()) return false; 1413 return VectorList.Count == 4 && VectorList.LaneIndex <= 3; 1414 } 1415 1416 bool isVecListFourDWordIndexed() const { 1417 if (!isSingleSpacedVectorIndexed()) return false; 1418 return VectorList.Count == 4 && VectorList.LaneIndex <= 1; 1419 } 1420 1421 bool isVectorIndex8() const { 1422 if (Kind != k_VectorIndex) return false; 1423 return VectorIndex.Val < 8; 1424 } 1425 bool isVectorIndex16() const { 1426 if (Kind != k_VectorIndex) return false; 1427 return VectorIndex.Val < 4; 1428 } 1429 bool isVectorIndex32() const { 1430 if (Kind != k_VectorIndex) return false; 1431 return VectorIndex.Val < 2; 1432 } 1433 1434 bool isNEONi8splat() const { 1435 if (!isImm()) return false; 1436 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1437 // Must be a constant. 1438 if (!CE) return false; 1439 int64_t Value = CE->getValue(); 1440 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 1441 // value. 1442 return Value >= 0 && Value < 256; 1443 } 1444 1445 bool isNEONi16splat() const { 1446 if (!isImm()) return false; 1447 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1448 // Must be a constant. 1449 if (!CE) return false; 1450 int64_t Value = CE->getValue(); 1451 // i16 value in the range [0,255] or [0x0100, 0xff00] 1452 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 1453 } 1454 1455 bool isNEONi32splat() const { 1456 if (!isImm()) return false; 1457 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1458 // Must be a constant. 1459 if (!CE) return false; 1460 int64_t Value = CE->getValue(); 1461 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 1462 return (Value >= 0 && Value < 256) || 1463 (Value >= 0x0100 && Value <= 0xff00) || 1464 (Value >= 0x010000 && Value <= 0xff0000) || 1465 (Value >= 0x01000000 && Value <= 0xff000000); 1466 } 1467 1468 bool isNEONi32vmov() const { 1469 if (!isImm()) return false; 1470 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1471 // Must be a constant. 1472 if (!CE) return false; 1473 int64_t Value = CE->getValue(); 1474 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1475 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1476 return (Value >= 0 && Value < 256) || 1477 (Value >= 0x0100 && Value <= 0xff00) || 1478 (Value >= 0x010000 && Value <= 0xff0000) || 1479 (Value >= 0x01000000 && Value <= 0xff000000) || 1480 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1481 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1482 } 1483 bool isNEONi32vmovNeg() const { 1484 if (!isImm()) return false; 1485 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1486 // Must be a constant. 1487 if (!CE) return false; 1488 int64_t Value = ~CE->getValue(); 1489 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1490 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1491 return (Value >= 0 && Value < 256) || 1492 (Value >= 0x0100 && Value <= 0xff00) || 1493 (Value >= 0x010000 && Value <= 0xff0000) || 1494 (Value >= 0x01000000 && Value <= 0xff000000) || 1495 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1496 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1497 } 1498 1499 bool isNEONi64splat() const { 1500 if (!isImm()) return false; 1501 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1502 // Must be a constant. 1503 if (!CE) return false; 1504 uint64_t Value = CE->getValue(); 1505 // i64 value with each byte being either 0 or 0xff. 1506 for (unsigned i = 0; i < 8; ++i) 1507 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1508 return true; 1509 } 1510 1511 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1512 // Add as immediates when possible. Null MCExpr = 0. 1513 if (Expr == 0) 1514 Inst.addOperand(MCOperand::CreateImm(0)); 1515 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1516 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1517 else 1518 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1519 } 1520 1521 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1522 assert(N == 2 && "Invalid number of operands!"); 1523 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1524 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1525 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1526 } 1527 1528 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1529 assert(N == 1 && "Invalid number of operands!"); 1530 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1531 } 1532 1533 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1534 assert(N == 1 && "Invalid number of operands!"); 1535 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1536 } 1537 1538 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1539 assert(N == 1 && "Invalid number of operands!"); 1540 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1541 } 1542 1543 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1544 assert(N == 1 && "Invalid number of operands!"); 1545 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1546 } 1547 1548 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1549 assert(N == 1 && "Invalid number of operands!"); 1550 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1551 } 1552 1553 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1554 assert(N == 1 && "Invalid number of operands!"); 1555 Inst.addOperand(MCOperand::CreateReg(getReg())); 1556 } 1557 1558 void addRegOperands(MCInst &Inst, unsigned N) const { 1559 assert(N == 1 && "Invalid number of operands!"); 1560 Inst.addOperand(MCOperand::CreateReg(getReg())); 1561 } 1562 1563 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1564 assert(N == 3 && "Invalid number of operands!"); 1565 assert(isRegShiftedReg() && 1566 "addRegShiftedRegOperands() on non RegShiftedReg!"); 1567 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1568 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1569 Inst.addOperand(MCOperand::CreateImm( 1570 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1571 } 1572 1573 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1574 assert(N == 2 && "Invalid number of operands!"); 1575 assert(isRegShiftedImm() && 1576 "addRegShiftedImmOperands() on non RegShiftedImm!"); 1577 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1578 // Shift of #32 is encoded as 0 where permitted 1579 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm); 1580 Inst.addOperand(MCOperand::CreateImm( 1581 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm))); 1582 } 1583 1584 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1585 assert(N == 1 && "Invalid number of operands!"); 1586 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1587 ShifterImm.Imm)); 1588 } 1589 1590 void addRegListOperands(MCInst &Inst, unsigned N) const { 1591 assert(N == 1 && "Invalid number of operands!"); 1592 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1593 for (SmallVectorImpl<unsigned>::const_iterator 1594 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1595 Inst.addOperand(MCOperand::CreateReg(*I)); 1596 } 1597 1598 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1599 addRegListOperands(Inst, N); 1600 } 1601 1602 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1603 addRegListOperands(Inst, N); 1604 } 1605 1606 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1607 assert(N == 1 && "Invalid number of operands!"); 1608 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1609 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1610 } 1611 1612 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1613 assert(N == 1 && "Invalid number of operands!"); 1614 // Munge the lsb/width into a bitfield mask. 1615 unsigned lsb = Bitfield.LSB; 1616 unsigned width = Bitfield.Width; 1617 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1618 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1619 (32 - (lsb + width))); 1620 Inst.addOperand(MCOperand::CreateImm(Mask)); 1621 } 1622 1623 void addImmOperands(MCInst &Inst, unsigned N) const { 1624 assert(N == 1 && "Invalid number of operands!"); 1625 addExpr(Inst, getImm()); 1626 } 1627 1628 void addFBits16Operands(MCInst &Inst, unsigned N) const { 1629 assert(N == 1 && "Invalid number of operands!"); 1630 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1631 Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue())); 1632 } 1633 1634 void addFBits32Operands(MCInst &Inst, unsigned N) const { 1635 assert(N == 1 && "Invalid number of operands!"); 1636 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1637 Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue())); 1638 } 1639 1640 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1641 assert(N == 1 && "Invalid number of operands!"); 1642 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1643 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); 1644 Inst.addOperand(MCOperand::CreateImm(Val)); 1645 } 1646 1647 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1648 assert(N == 1 && "Invalid number of operands!"); 1649 // FIXME: We really want to scale the value here, but the LDRD/STRD 1650 // instruction don't encode operands that way yet. 1651 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1652 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1653 } 1654 1655 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1656 assert(N == 1 && "Invalid number of operands!"); 1657 // The immediate is scaled by four in the encoding and is stored 1658 // in the MCInst as such. Lop off the low two bits here. 1659 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1660 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1661 } 1662 1663 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const { 1664 assert(N == 1 && "Invalid number of operands!"); 1665 // The immediate is scaled by four in the encoding and is stored 1666 // in the MCInst as such. Lop off the low two bits here. 1667 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1668 Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4))); 1669 } 1670 1671 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1672 assert(N == 1 && "Invalid number of operands!"); 1673 // The immediate is scaled by four in the encoding and is stored 1674 // in the MCInst as such. Lop off the low two bits here. 1675 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1676 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1677 } 1678 1679 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1680 assert(N == 1 && "Invalid number of operands!"); 1681 // The constant encodes as the immediate-1, and we store in the instruction 1682 // the bits as encoded, so subtract off one here. 1683 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1684 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1685 } 1686 1687 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1688 assert(N == 1 && "Invalid number of operands!"); 1689 // The constant encodes as the immediate-1, and we store in the instruction 1690 // the bits as encoded, so subtract off one here. 1691 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1692 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1693 } 1694 1695 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1696 assert(N == 1 && "Invalid number of operands!"); 1697 // The constant encodes as the immediate, except for 32, which encodes as 1698 // zero. 1699 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1700 unsigned Imm = CE->getValue(); 1701 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1702 } 1703 1704 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1705 assert(N == 1 && "Invalid number of operands!"); 1706 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1707 // the instruction as well. 1708 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1709 int Val = CE->getValue(); 1710 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1711 } 1712 1713 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 1714 assert(N == 1 && "Invalid number of operands!"); 1715 // The operand is actually a t2_so_imm, but we have its bitwise 1716 // negation in the assembly source, so twiddle it here. 1717 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1718 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1719 } 1720 1721 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const { 1722 assert(N == 1 && "Invalid number of operands!"); 1723 // The operand is actually a t2_so_imm, but we have its 1724 // negation in the assembly source, so twiddle it here. 1725 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1726 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1727 } 1728 1729 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const { 1730 assert(N == 1 && "Invalid number of operands!"); 1731 // The operand is actually an imm0_4095, but we have its 1732 // negation in the assembly source, so twiddle it here. 1733 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1734 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1735 } 1736 1737 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const { 1738 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) { 1739 Inst.addOperand(MCOperand::CreateImm(CE->getValue() >> 2)); 1740 return; 1741 } 1742 1743 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val); 1744 assert(SR && "Unknown value type!"); 1745 Inst.addOperand(MCOperand::CreateExpr(SR)); 1746 } 1747 1748 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const { 1749 assert(N == 1 && "Invalid number of operands!"); 1750 if (isImm()) { 1751 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1752 if (CE) { 1753 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1754 return; 1755 } 1756 1757 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val); 1758 assert(SR && "Unknown value type!"); 1759 Inst.addOperand(MCOperand::CreateExpr(SR)); 1760 return; 1761 } 1762 1763 assert(isMem() && "Unknown value type!"); 1764 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!"); 1765 Inst.addOperand(MCOperand::CreateImm(Memory.OffsetImm->getValue())); 1766 } 1767 1768 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 1769 assert(N == 1 && "Invalid number of operands!"); 1770 // The operand is actually a so_imm, but we have its bitwise 1771 // negation in the assembly source, so twiddle it here. 1772 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1773 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1774 } 1775 1776 void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const { 1777 assert(N == 1 && "Invalid number of operands!"); 1778 // The operand is actually a so_imm, but we have its 1779 // negation in the assembly source, so twiddle it here. 1780 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1781 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1782 } 1783 1784 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1785 assert(N == 1 && "Invalid number of operands!"); 1786 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1787 } 1788 1789 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const { 1790 assert(N == 1 && "Invalid number of operands!"); 1791 Inst.addOperand(MCOperand::CreateImm(unsigned(getInstSyncBarrierOpt()))); 1792 } 1793 1794 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1795 assert(N == 1 && "Invalid number of operands!"); 1796 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1797 } 1798 1799 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const { 1800 assert(N == 1 && "Invalid number of operands!"); 1801 int32_t Imm = Memory.OffsetImm->getValue(); 1802 Inst.addOperand(MCOperand::CreateImm(Imm)); 1803 } 1804 1805 void addAdrLabelOperands(MCInst &Inst, unsigned N) const { 1806 assert(N == 1 && "Invalid number of operands!"); 1807 assert(isImm() && "Not an immediate!"); 1808 1809 // If we have an immediate that's not a constant, treat it as a label 1810 // reference needing a fixup. 1811 if (!isa<MCConstantExpr>(getImm())) { 1812 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1813 return; 1814 } 1815 1816 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1817 int Val = CE->getValue(); 1818 Inst.addOperand(MCOperand::CreateImm(Val)); 1819 } 1820 1821 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1822 assert(N == 2 && "Invalid number of operands!"); 1823 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1824 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1825 } 1826 1827 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1828 assert(N == 3 && "Invalid number of operands!"); 1829 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1830 if (!Memory.OffsetRegNum) { 1831 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1832 // Special case for #-0 1833 if (Val == INT32_MIN) Val = 0; 1834 if (Val < 0) Val = -Val; 1835 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1836 } else { 1837 // For register offset, we encode the shift type and negation flag 1838 // here. 1839 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1840 Memory.ShiftImm, Memory.ShiftType); 1841 } 1842 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1843 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1844 Inst.addOperand(MCOperand::CreateImm(Val)); 1845 } 1846 1847 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1848 assert(N == 2 && "Invalid number of operands!"); 1849 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1850 assert(CE && "non-constant AM2OffsetImm operand!"); 1851 int32_t Val = CE->getValue(); 1852 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1853 // Special case for #-0 1854 if (Val == INT32_MIN) Val = 0; 1855 if (Val < 0) Val = -Val; 1856 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1857 Inst.addOperand(MCOperand::CreateReg(0)); 1858 Inst.addOperand(MCOperand::CreateImm(Val)); 1859 } 1860 1861 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1862 assert(N == 3 && "Invalid number of operands!"); 1863 // If we have an immediate that's not a constant, treat it as a label 1864 // reference needing a fixup. If it is a constant, it's something else 1865 // and we reject it. 1866 if (isImm()) { 1867 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1868 Inst.addOperand(MCOperand::CreateReg(0)); 1869 Inst.addOperand(MCOperand::CreateImm(0)); 1870 return; 1871 } 1872 1873 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1874 if (!Memory.OffsetRegNum) { 1875 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1876 // Special case for #-0 1877 if (Val == INT32_MIN) Val = 0; 1878 if (Val < 0) Val = -Val; 1879 Val = ARM_AM::getAM3Opc(AddSub, Val); 1880 } else { 1881 // For register offset, we encode the shift type and negation flag 1882 // here. 1883 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1884 } 1885 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1886 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1887 Inst.addOperand(MCOperand::CreateImm(Val)); 1888 } 1889 1890 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1891 assert(N == 2 && "Invalid number of operands!"); 1892 if (Kind == k_PostIndexRegister) { 1893 int32_t Val = 1894 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1895 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1896 Inst.addOperand(MCOperand::CreateImm(Val)); 1897 return; 1898 } 1899 1900 // Constant offset. 1901 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1902 int32_t Val = CE->getValue(); 1903 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1904 // Special case for #-0 1905 if (Val == INT32_MIN) Val = 0; 1906 if (Val < 0) Val = -Val; 1907 Val = ARM_AM::getAM3Opc(AddSub, Val); 1908 Inst.addOperand(MCOperand::CreateReg(0)); 1909 Inst.addOperand(MCOperand::CreateImm(Val)); 1910 } 1911 1912 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1913 assert(N == 2 && "Invalid number of operands!"); 1914 // If we have an immediate that's not a constant, treat it as a label 1915 // reference needing a fixup. If it is a constant, it's something else 1916 // and we reject it. 1917 if (isImm()) { 1918 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1919 Inst.addOperand(MCOperand::CreateImm(0)); 1920 return; 1921 } 1922 1923 // The lower two bits are always zero and as such are not encoded. 1924 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1925 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1926 // Special case for #-0 1927 if (Val == INT32_MIN) Val = 0; 1928 if (Val < 0) Val = -Val; 1929 Val = ARM_AM::getAM5Opc(AddSub, Val); 1930 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1931 Inst.addOperand(MCOperand::CreateImm(Val)); 1932 } 1933 1934 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1935 assert(N == 2 && "Invalid number of operands!"); 1936 // If we have an immediate that's not a constant, treat it as a label 1937 // reference needing a fixup. If it is a constant, it's something else 1938 // and we reject it. 1939 if (isImm()) { 1940 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1941 Inst.addOperand(MCOperand::CreateImm(0)); 1942 return; 1943 } 1944 1945 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1946 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1947 Inst.addOperand(MCOperand::CreateImm(Val)); 1948 } 1949 1950 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1951 assert(N == 2 && "Invalid number of operands!"); 1952 // The lower two bits are always zero and as such are not encoded. 1953 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1954 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1955 Inst.addOperand(MCOperand::CreateImm(Val)); 1956 } 1957 1958 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1959 assert(N == 2 && "Invalid number of operands!"); 1960 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1961 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1962 Inst.addOperand(MCOperand::CreateImm(Val)); 1963 } 1964 1965 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1966 addMemImm8OffsetOperands(Inst, N); 1967 } 1968 1969 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1970 addMemImm8OffsetOperands(Inst, N); 1971 } 1972 1973 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1974 assert(N == 2 && "Invalid number of operands!"); 1975 // If this is an immediate, it's a label reference. 1976 if (isImm()) { 1977 addExpr(Inst, getImm()); 1978 Inst.addOperand(MCOperand::CreateImm(0)); 1979 return; 1980 } 1981 1982 // Otherwise, it's a normal memory reg+offset. 1983 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1984 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1985 Inst.addOperand(MCOperand::CreateImm(Val)); 1986 } 1987 1988 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1989 assert(N == 2 && "Invalid number of operands!"); 1990 // If this is an immediate, it's a label reference. 1991 if (isImm()) { 1992 addExpr(Inst, getImm()); 1993 Inst.addOperand(MCOperand::CreateImm(0)); 1994 return; 1995 } 1996 1997 // Otherwise, it's a normal memory reg+offset. 1998 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1999 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 2000 Inst.addOperand(MCOperand::CreateImm(Val)); 2001 } 2002 2003 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 2004 assert(N == 2 && "Invalid number of operands!"); 2005 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 2006 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 2007 } 2008 2009 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 2010 assert(N == 2 && "Invalid number of operands!"); 2011 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 2012 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 2013 } 2014 2015 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 2016 assert(N == 3 && "Invalid number of operands!"); 2017 unsigned Val = 2018 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 2019 Memory.ShiftImm, Memory.ShiftType); 2020 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 2021 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 2022 Inst.addOperand(MCOperand::CreateImm(Val)); 2023 } 2024 2025 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 2026 assert(N == 3 && "Invalid number of operands!"); 2027 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 2028 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 2029 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 2030 } 2031 2032 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 2033 assert(N == 2 && "Invalid number of operands!"); 2034 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 2035 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 2036 } 2037 2038 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 2039 assert(N == 2 && "Invalid number of operands!"); 2040 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 2041 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 2042 Inst.addOperand(MCOperand::CreateImm(Val)); 2043 } 2044 2045 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 2046 assert(N == 2 && "Invalid number of operands!"); 2047 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 2048 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 2049 Inst.addOperand(MCOperand::CreateImm(Val)); 2050 } 2051 2052 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 2053 assert(N == 2 && "Invalid number of operands!"); 2054 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 2055 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 2056 Inst.addOperand(MCOperand::CreateImm(Val)); 2057 } 2058 2059 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 2060 assert(N == 2 && "Invalid number of operands!"); 2061 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 2062 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 2063 Inst.addOperand(MCOperand::CreateImm(Val)); 2064 } 2065 2066 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 2067 assert(N == 1 && "Invalid number of operands!"); 2068 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2069 assert(CE && "non-constant post-idx-imm8 operand!"); 2070 int Imm = CE->getValue(); 2071 bool isAdd = Imm >= 0; 2072 if (Imm == INT32_MIN) Imm = 0; 2073 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 2074 Inst.addOperand(MCOperand::CreateImm(Imm)); 2075 } 2076 2077 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 2078 assert(N == 1 && "Invalid number of operands!"); 2079 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2080 assert(CE && "non-constant post-idx-imm8s4 operand!"); 2081 int Imm = CE->getValue(); 2082 bool isAdd = Imm >= 0; 2083 if (Imm == INT32_MIN) Imm = 0; 2084 // Immediate is scaled by 4. 2085 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 2086 Inst.addOperand(MCOperand::CreateImm(Imm)); 2087 } 2088 2089 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 2090 assert(N == 2 && "Invalid number of operands!"); 2091 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 2092 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 2093 } 2094 2095 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 2096 assert(N == 2 && "Invalid number of operands!"); 2097 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 2098 // The sign, shift type, and shift amount are encoded in a single operand 2099 // using the AM2 encoding helpers. 2100 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 2101 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 2102 PostIdxReg.ShiftTy); 2103 Inst.addOperand(MCOperand::CreateImm(Imm)); 2104 } 2105 2106 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 2107 assert(N == 1 && "Invalid number of operands!"); 2108 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 2109 } 2110 2111 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 2112 assert(N == 1 && "Invalid number of operands!"); 2113 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 2114 } 2115 2116 void addVecListOperands(MCInst &Inst, unsigned N) const { 2117 assert(N == 1 && "Invalid number of operands!"); 2118 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 2119 } 2120 2121 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const { 2122 assert(N == 2 && "Invalid number of operands!"); 2123 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 2124 Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex)); 2125 } 2126 2127 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 2128 assert(N == 1 && "Invalid number of operands!"); 2129 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 2130 } 2131 2132 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 2133 assert(N == 1 && "Invalid number of operands!"); 2134 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 2135 } 2136 2137 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 2138 assert(N == 1 && "Invalid number of operands!"); 2139 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 2140 } 2141 2142 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 2143 assert(N == 1 && "Invalid number of operands!"); 2144 // The immediate encodes the type of constant as well as the value. 2145 // Mask in that this is an i8 splat. 2146 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2147 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 2148 } 2149 2150 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 2151 assert(N == 1 && "Invalid number of operands!"); 2152 // The immediate encodes the type of constant as well as the value. 2153 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2154 unsigned Value = CE->getValue(); 2155 if (Value >= 256) 2156 Value = (Value >> 8) | 0xa00; 2157 else 2158 Value |= 0x800; 2159 Inst.addOperand(MCOperand::CreateImm(Value)); 2160 } 2161 2162 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 2163 assert(N == 1 && "Invalid number of operands!"); 2164 // The immediate encodes the type of constant as well as the value. 2165 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2166 unsigned Value = CE->getValue(); 2167 if (Value >= 256 && Value <= 0xff00) 2168 Value = (Value >> 8) | 0x200; 2169 else if (Value > 0xffff && Value <= 0xff0000) 2170 Value = (Value >> 16) | 0x400; 2171 else if (Value > 0xffffff) 2172 Value = (Value >> 24) | 0x600; 2173 Inst.addOperand(MCOperand::CreateImm(Value)); 2174 } 2175 2176 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 2177 assert(N == 1 && "Invalid number of operands!"); 2178 // The immediate encodes the type of constant as well as the value. 2179 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2180 unsigned Value = CE->getValue(); 2181 if (Value >= 256 && Value <= 0xffff) 2182 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 2183 else if (Value > 0xffff && Value <= 0xffffff) 2184 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 2185 else if (Value > 0xffffff) 2186 Value = (Value >> 24) | 0x600; 2187 Inst.addOperand(MCOperand::CreateImm(Value)); 2188 } 2189 2190 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const { 2191 assert(N == 1 && "Invalid number of operands!"); 2192 // The immediate encodes the type of constant as well as the value. 2193 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2194 unsigned Value = ~CE->getValue(); 2195 if (Value >= 256 && Value <= 0xffff) 2196 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 2197 else if (Value > 0xffff && Value <= 0xffffff) 2198 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 2199 else if (Value > 0xffffff) 2200 Value = (Value >> 24) | 0x600; 2201 Inst.addOperand(MCOperand::CreateImm(Value)); 2202 } 2203 2204 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 2205 assert(N == 1 && "Invalid number of operands!"); 2206 // The immediate encodes the type of constant as well as the value. 2207 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2208 uint64_t Value = CE->getValue(); 2209 unsigned Imm = 0; 2210 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 2211 Imm |= (Value & 1) << i; 2212 } 2213 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 2214 } 2215 2216 virtual void print(raw_ostream &OS) const; 2217 2218 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 2219 ARMOperand *Op = new ARMOperand(k_ITCondMask); 2220 Op->ITMask.Mask = Mask; 2221 Op->StartLoc = S; 2222 Op->EndLoc = S; 2223 return Op; 2224 } 2225 2226 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 2227 ARMOperand *Op = new ARMOperand(k_CondCode); 2228 Op->CC.Val = CC; 2229 Op->StartLoc = S; 2230 Op->EndLoc = S; 2231 return Op; 2232 } 2233 2234 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 2235 ARMOperand *Op = new ARMOperand(k_CoprocNum); 2236 Op->Cop.Val = CopVal; 2237 Op->StartLoc = S; 2238 Op->EndLoc = S; 2239 return Op; 2240 } 2241 2242 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 2243 ARMOperand *Op = new ARMOperand(k_CoprocReg); 2244 Op->Cop.Val = CopVal; 2245 Op->StartLoc = S; 2246 Op->EndLoc = S; 2247 return Op; 2248 } 2249 2250 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 2251 ARMOperand *Op = new ARMOperand(k_CoprocOption); 2252 Op->Cop.Val = Val; 2253 Op->StartLoc = S; 2254 Op->EndLoc = E; 2255 return Op; 2256 } 2257 2258 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 2259 ARMOperand *Op = new ARMOperand(k_CCOut); 2260 Op->Reg.RegNum = RegNum; 2261 Op->StartLoc = S; 2262 Op->EndLoc = S; 2263 return Op; 2264 } 2265 2266 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 2267 ARMOperand *Op = new ARMOperand(k_Token); 2268 Op->Tok.Data = Str.data(); 2269 Op->Tok.Length = Str.size(); 2270 Op->StartLoc = S; 2271 Op->EndLoc = S; 2272 return Op; 2273 } 2274 2275 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 2276 ARMOperand *Op = new ARMOperand(k_Register); 2277 Op->Reg.RegNum = RegNum; 2278 Op->StartLoc = S; 2279 Op->EndLoc = E; 2280 return Op; 2281 } 2282 2283 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 2284 unsigned SrcReg, 2285 unsigned ShiftReg, 2286 unsigned ShiftImm, 2287 SMLoc S, SMLoc E) { 2288 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 2289 Op->RegShiftedReg.ShiftTy = ShTy; 2290 Op->RegShiftedReg.SrcReg = SrcReg; 2291 Op->RegShiftedReg.ShiftReg = ShiftReg; 2292 Op->RegShiftedReg.ShiftImm = ShiftImm; 2293 Op->StartLoc = S; 2294 Op->EndLoc = E; 2295 return Op; 2296 } 2297 2298 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 2299 unsigned SrcReg, 2300 unsigned ShiftImm, 2301 SMLoc S, SMLoc E) { 2302 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 2303 Op->RegShiftedImm.ShiftTy = ShTy; 2304 Op->RegShiftedImm.SrcReg = SrcReg; 2305 Op->RegShiftedImm.ShiftImm = ShiftImm; 2306 Op->StartLoc = S; 2307 Op->EndLoc = E; 2308 return Op; 2309 } 2310 2311 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 2312 SMLoc S, SMLoc E) { 2313 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 2314 Op->ShifterImm.isASR = isASR; 2315 Op->ShifterImm.Imm = Imm; 2316 Op->StartLoc = S; 2317 Op->EndLoc = E; 2318 return Op; 2319 } 2320 2321 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 2322 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 2323 Op->RotImm.Imm = Imm; 2324 Op->StartLoc = S; 2325 Op->EndLoc = E; 2326 return Op; 2327 } 2328 2329 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 2330 SMLoc S, SMLoc E) { 2331 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 2332 Op->Bitfield.LSB = LSB; 2333 Op->Bitfield.Width = Width; 2334 Op->StartLoc = S; 2335 Op->EndLoc = E; 2336 return Op; 2337 } 2338 2339 static ARMOperand * 2340 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned> > &Regs, 2341 SMLoc StartLoc, SMLoc EndLoc) { 2342 assert (Regs.size() > 0 && "RegList contains no registers?"); 2343 KindTy Kind = k_RegisterList; 2344 2345 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().second)) 2346 Kind = k_DPRRegisterList; 2347 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 2348 contains(Regs.front().second)) 2349 Kind = k_SPRRegisterList; 2350 2351 // Sort based on the register encoding values. 2352 array_pod_sort(Regs.begin(), Regs.end()); 2353 2354 ARMOperand *Op = new ARMOperand(Kind); 2355 for (SmallVectorImpl<std::pair<unsigned, unsigned> >::const_iterator 2356 I = Regs.begin(), E = Regs.end(); I != E; ++I) 2357 Op->Registers.push_back(I->second); 2358 Op->StartLoc = StartLoc; 2359 Op->EndLoc = EndLoc; 2360 return Op; 2361 } 2362 2363 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 2364 bool isDoubleSpaced, SMLoc S, SMLoc E) { 2365 ARMOperand *Op = new ARMOperand(k_VectorList); 2366 Op->VectorList.RegNum = RegNum; 2367 Op->VectorList.Count = Count; 2368 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 2369 Op->StartLoc = S; 2370 Op->EndLoc = E; 2371 return Op; 2372 } 2373 2374 static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count, 2375 bool isDoubleSpaced, 2376 SMLoc S, SMLoc E) { 2377 ARMOperand *Op = new ARMOperand(k_VectorListAllLanes); 2378 Op->VectorList.RegNum = RegNum; 2379 Op->VectorList.Count = Count; 2380 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 2381 Op->StartLoc = S; 2382 Op->EndLoc = E; 2383 return Op; 2384 } 2385 2386 static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count, 2387 unsigned Index, 2388 bool isDoubleSpaced, 2389 SMLoc S, SMLoc E) { 2390 ARMOperand *Op = new ARMOperand(k_VectorListIndexed); 2391 Op->VectorList.RegNum = RegNum; 2392 Op->VectorList.Count = Count; 2393 Op->VectorList.LaneIndex = Index; 2394 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 2395 Op->StartLoc = S; 2396 Op->EndLoc = E; 2397 return Op; 2398 } 2399 2400 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 2401 MCContext &Ctx) { 2402 ARMOperand *Op = new ARMOperand(k_VectorIndex); 2403 Op->VectorIndex.Val = Idx; 2404 Op->StartLoc = S; 2405 Op->EndLoc = E; 2406 return Op; 2407 } 2408 2409 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 2410 ARMOperand *Op = new ARMOperand(k_Immediate); 2411 Op->Imm.Val = Val; 2412 Op->StartLoc = S; 2413 Op->EndLoc = E; 2414 return Op; 2415 } 2416 2417 static ARMOperand *CreateMem(unsigned BaseRegNum, 2418 const MCConstantExpr *OffsetImm, 2419 unsigned OffsetRegNum, 2420 ARM_AM::ShiftOpc ShiftType, 2421 unsigned ShiftImm, 2422 unsigned Alignment, 2423 bool isNegative, 2424 SMLoc S, SMLoc E) { 2425 ARMOperand *Op = new ARMOperand(k_Memory); 2426 Op->Memory.BaseRegNum = BaseRegNum; 2427 Op->Memory.OffsetImm = OffsetImm; 2428 Op->Memory.OffsetRegNum = OffsetRegNum; 2429 Op->Memory.ShiftType = ShiftType; 2430 Op->Memory.ShiftImm = ShiftImm; 2431 Op->Memory.Alignment = Alignment; 2432 Op->Memory.isNegative = isNegative; 2433 Op->StartLoc = S; 2434 Op->EndLoc = E; 2435 return Op; 2436 } 2437 2438 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 2439 ARM_AM::ShiftOpc ShiftTy, 2440 unsigned ShiftImm, 2441 SMLoc S, SMLoc E) { 2442 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 2443 Op->PostIdxReg.RegNum = RegNum; 2444 Op->PostIdxReg.isAdd = isAdd; 2445 Op->PostIdxReg.ShiftTy = ShiftTy; 2446 Op->PostIdxReg.ShiftImm = ShiftImm; 2447 Op->StartLoc = S; 2448 Op->EndLoc = E; 2449 return Op; 2450 } 2451 2452 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 2453 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 2454 Op->MBOpt.Val = Opt; 2455 Op->StartLoc = S; 2456 Op->EndLoc = S; 2457 return Op; 2458 } 2459 2460 static ARMOperand *CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, 2461 SMLoc S) { 2462 ARMOperand *Op = new ARMOperand(k_InstSyncBarrierOpt); 2463 Op->ISBOpt.Val = Opt; 2464 Op->StartLoc = S; 2465 Op->EndLoc = S; 2466 return Op; 2467 } 2468 2469 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 2470 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 2471 Op->IFlags.Val = IFlags; 2472 Op->StartLoc = S; 2473 Op->EndLoc = S; 2474 return Op; 2475 } 2476 2477 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 2478 ARMOperand *Op = new ARMOperand(k_MSRMask); 2479 Op->MMask.Val = MMask; 2480 Op->StartLoc = S; 2481 Op->EndLoc = S; 2482 return Op; 2483 } 2484 }; 2485 2486 } // end anonymous namespace. 2487 2488 void ARMOperand::print(raw_ostream &OS) const { 2489 switch (Kind) { 2490 case k_CondCode: 2491 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 2492 break; 2493 case k_CCOut: 2494 OS << "<ccout " << getReg() << ">"; 2495 break; 2496 case k_ITCondMask: { 2497 static const char *const MaskStr[] = { 2498 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 2499 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 2500 }; 2501 assert((ITMask.Mask & 0xf) == ITMask.Mask); 2502 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 2503 break; 2504 } 2505 case k_CoprocNum: 2506 OS << "<coprocessor number: " << getCoproc() << ">"; 2507 break; 2508 case k_CoprocReg: 2509 OS << "<coprocessor register: " << getCoproc() << ">"; 2510 break; 2511 case k_CoprocOption: 2512 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 2513 break; 2514 case k_MSRMask: 2515 OS << "<mask: " << getMSRMask() << ">"; 2516 break; 2517 case k_Immediate: 2518 getImm()->print(OS); 2519 break; 2520 case k_MemBarrierOpt: 2521 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">"; 2522 break; 2523 case k_InstSyncBarrierOpt: 2524 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">"; 2525 break; 2526 case k_Memory: 2527 OS << "<memory " 2528 << " base:" << Memory.BaseRegNum; 2529 OS << ">"; 2530 break; 2531 case k_PostIndexRegister: 2532 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 2533 << PostIdxReg.RegNum; 2534 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 2535 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 2536 << PostIdxReg.ShiftImm; 2537 OS << ">"; 2538 break; 2539 case k_ProcIFlags: { 2540 OS << "<ARM_PROC::"; 2541 unsigned IFlags = getProcIFlags(); 2542 for (int i=2; i >= 0; --i) 2543 if (IFlags & (1 << i)) 2544 OS << ARM_PROC::IFlagsToString(1 << i); 2545 OS << ">"; 2546 break; 2547 } 2548 case k_Register: 2549 OS << "<register " << getReg() << ">"; 2550 break; 2551 case k_ShifterImmediate: 2552 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 2553 << " #" << ShifterImm.Imm << ">"; 2554 break; 2555 case k_ShiftedRegister: 2556 OS << "<so_reg_reg " 2557 << RegShiftedReg.SrcReg << " " 2558 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) 2559 << " " << RegShiftedReg.ShiftReg << ">"; 2560 break; 2561 case k_ShiftedImmediate: 2562 OS << "<so_reg_imm " 2563 << RegShiftedImm.SrcReg << " " 2564 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) 2565 << " #" << RegShiftedImm.ShiftImm << ">"; 2566 break; 2567 case k_RotateImmediate: 2568 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 2569 break; 2570 case k_BitfieldDescriptor: 2571 OS << "<bitfield " << "lsb: " << Bitfield.LSB 2572 << ", width: " << Bitfield.Width << ">"; 2573 break; 2574 case k_RegisterList: 2575 case k_DPRRegisterList: 2576 case k_SPRRegisterList: { 2577 OS << "<register_list "; 2578 2579 const SmallVectorImpl<unsigned> &RegList = getRegList(); 2580 for (SmallVectorImpl<unsigned>::const_iterator 2581 I = RegList.begin(), E = RegList.end(); I != E; ) { 2582 OS << *I; 2583 if (++I < E) OS << ", "; 2584 } 2585 2586 OS << ">"; 2587 break; 2588 } 2589 case k_VectorList: 2590 OS << "<vector_list " << VectorList.Count << " * " 2591 << VectorList.RegNum << ">"; 2592 break; 2593 case k_VectorListAllLanes: 2594 OS << "<vector_list(all lanes) " << VectorList.Count << " * " 2595 << VectorList.RegNum << ">"; 2596 break; 2597 case k_VectorListIndexed: 2598 OS << "<vector_list(lane " << VectorList.LaneIndex << ") " 2599 << VectorList.Count << " * " << VectorList.RegNum << ">"; 2600 break; 2601 case k_Token: 2602 OS << "'" << getToken() << "'"; 2603 break; 2604 case k_VectorIndex: 2605 OS << "<vectorindex " << getVectorIndex() << ">"; 2606 break; 2607 } 2608 } 2609 2610 /// @name Auto-generated Match Functions 2611 /// { 2612 2613 static unsigned MatchRegisterName(StringRef Name); 2614 2615 /// } 2616 2617 bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2618 SMLoc &StartLoc, SMLoc &EndLoc) { 2619 StartLoc = Parser.getTok().getLoc(); 2620 EndLoc = Parser.getTok().getEndLoc(); 2621 RegNo = tryParseRegister(); 2622 2623 return (RegNo == (unsigned)-1); 2624 } 2625 2626 /// Try to parse a register name. The token must be an Identifier when called, 2627 /// and if it is a register name the token is eaten and the register number is 2628 /// returned. Otherwise return -1. 2629 /// 2630 int ARMAsmParser::tryParseRegister() { 2631 const AsmToken &Tok = Parser.getTok(); 2632 if (Tok.isNot(AsmToken::Identifier)) return -1; 2633 2634 std::string lowerCase = Tok.getString().lower(); 2635 unsigned RegNum = MatchRegisterName(lowerCase); 2636 if (!RegNum) { 2637 RegNum = StringSwitch<unsigned>(lowerCase) 2638 .Case("r13", ARM::SP) 2639 .Case("r14", ARM::LR) 2640 .Case("r15", ARM::PC) 2641 .Case("ip", ARM::R12) 2642 // Additional register name aliases for 'gas' compatibility. 2643 .Case("a1", ARM::R0) 2644 .Case("a2", ARM::R1) 2645 .Case("a3", ARM::R2) 2646 .Case("a4", ARM::R3) 2647 .Case("v1", ARM::R4) 2648 .Case("v2", ARM::R5) 2649 .Case("v3", ARM::R6) 2650 .Case("v4", ARM::R7) 2651 .Case("v5", ARM::R8) 2652 .Case("v6", ARM::R9) 2653 .Case("v7", ARM::R10) 2654 .Case("v8", ARM::R11) 2655 .Case("sb", ARM::R9) 2656 .Case("sl", ARM::R10) 2657 .Case("fp", ARM::R11) 2658 .Default(0); 2659 } 2660 if (!RegNum) { 2661 // Check for aliases registered via .req. Canonicalize to lower case. 2662 // That's more consistent since register names are case insensitive, and 2663 // it's how the original entry was passed in from MC/MCParser/AsmParser. 2664 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase); 2665 // If no match, return failure. 2666 if (Entry == RegisterReqs.end()) 2667 return -1; 2668 Parser.Lex(); // Eat identifier token. 2669 return Entry->getValue(); 2670 } 2671 2672 Parser.Lex(); // Eat identifier token. 2673 2674 return RegNum; 2675 } 2676 2677 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2678 // If a recoverable error occurs, return 1. If an irrecoverable error 2679 // occurs, return -1. An irrecoverable error is one where tokens have been 2680 // consumed in the process of trying to parse the shifter (i.e., when it is 2681 // indeed a shifter operand, but malformed). 2682 int ARMAsmParser::tryParseShiftRegister( 2683 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2684 SMLoc S = Parser.getTok().getLoc(); 2685 const AsmToken &Tok = Parser.getTok(); 2686 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2687 2688 std::string lowerCase = Tok.getString().lower(); 2689 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2690 .Case("asl", ARM_AM::lsl) 2691 .Case("lsl", ARM_AM::lsl) 2692 .Case("lsr", ARM_AM::lsr) 2693 .Case("asr", ARM_AM::asr) 2694 .Case("ror", ARM_AM::ror) 2695 .Case("rrx", ARM_AM::rrx) 2696 .Default(ARM_AM::no_shift); 2697 2698 if (ShiftTy == ARM_AM::no_shift) 2699 return 1; 2700 2701 Parser.Lex(); // Eat the operator. 2702 2703 // The source register for the shift has already been added to the 2704 // operand list, so we need to pop it off and combine it into the shifted 2705 // register operand instead. 2706 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2707 if (!PrevOp->isReg()) 2708 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2709 int SrcReg = PrevOp->getReg(); 2710 2711 SMLoc EndLoc; 2712 int64_t Imm = 0; 2713 int ShiftReg = 0; 2714 if (ShiftTy == ARM_AM::rrx) { 2715 // RRX Doesn't have an explicit shift amount. The encoder expects 2716 // the shift register to be the same as the source register. Seems odd, 2717 // but OK. 2718 ShiftReg = SrcReg; 2719 } else { 2720 // Figure out if this is shifted by a constant or a register (for non-RRX). 2721 if (Parser.getTok().is(AsmToken::Hash) || 2722 Parser.getTok().is(AsmToken::Dollar)) { 2723 Parser.Lex(); // Eat hash. 2724 SMLoc ImmLoc = Parser.getTok().getLoc(); 2725 const MCExpr *ShiftExpr = 0; 2726 if (getParser().parseExpression(ShiftExpr, EndLoc)) { 2727 Error(ImmLoc, "invalid immediate shift value"); 2728 return -1; 2729 } 2730 // The expression must be evaluatable as an immediate. 2731 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2732 if (!CE) { 2733 Error(ImmLoc, "invalid immediate shift value"); 2734 return -1; 2735 } 2736 // Range check the immediate. 2737 // lsl, ror: 0 <= imm <= 31 2738 // lsr, asr: 0 <= imm <= 32 2739 Imm = CE->getValue(); 2740 if (Imm < 0 || 2741 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2742 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2743 Error(ImmLoc, "immediate shift value out of range"); 2744 return -1; 2745 } 2746 // shift by zero is a nop. Always send it through as lsl. 2747 // ('as' compatibility) 2748 if (Imm == 0) 2749 ShiftTy = ARM_AM::lsl; 2750 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2751 SMLoc L = Parser.getTok().getLoc(); 2752 EndLoc = Parser.getTok().getEndLoc(); 2753 ShiftReg = tryParseRegister(); 2754 if (ShiftReg == -1) { 2755 Error (L, "expected immediate or register in shift operand"); 2756 return -1; 2757 } 2758 } else { 2759 Error (Parser.getTok().getLoc(), 2760 "expected immediate or register in shift operand"); 2761 return -1; 2762 } 2763 } 2764 2765 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2766 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2767 ShiftReg, Imm, 2768 S, EndLoc)); 2769 else 2770 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2771 S, EndLoc)); 2772 2773 return 0; 2774 } 2775 2776 2777 /// Try to parse a register name. The token must be an Identifier when called. 2778 /// If it's a register, an AsmOperand is created. Another AsmOperand is created 2779 /// if there is a "writeback". 'true' if it's not a register. 2780 /// 2781 /// TODO this is likely to change to allow different register types and or to 2782 /// parse for a specific register type. 2783 bool ARMAsmParser:: 2784 tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2785 const AsmToken &RegTok = Parser.getTok(); 2786 int RegNo = tryParseRegister(); 2787 if (RegNo == -1) 2788 return true; 2789 2790 Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(), 2791 RegTok.getEndLoc())); 2792 2793 const AsmToken &ExclaimTok = Parser.getTok(); 2794 if (ExclaimTok.is(AsmToken::Exclaim)) { 2795 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2796 ExclaimTok.getLoc())); 2797 Parser.Lex(); // Eat exclaim token 2798 return false; 2799 } 2800 2801 // Also check for an index operand. This is only legal for vector registers, 2802 // but that'll get caught OK in operand matching, so we don't need to 2803 // explicitly filter everything else out here. 2804 if (Parser.getTok().is(AsmToken::LBrac)) { 2805 SMLoc SIdx = Parser.getTok().getLoc(); 2806 Parser.Lex(); // Eat left bracket token. 2807 2808 const MCExpr *ImmVal; 2809 if (getParser().parseExpression(ImmVal)) 2810 return true; 2811 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2812 if (!MCE) 2813 return TokError("immediate value expected for vector index"); 2814 2815 if (Parser.getTok().isNot(AsmToken::RBrac)) 2816 return Error(Parser.getTok().getLoc(), "']' expected"); 2817 2818 SMLoc E = Parser.getTok().getEndLoc(); 2819 Parser.Lex(); // Eat right bracket token. 2820 2821 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2822 SIdx, E, 2823 getContext())); 2824 } 2825 2826 return false; 2827 } 2828 2829 /// MatchCoprocessorOperandName - Try to parse an coprocessor related 2830 /// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2831 /// "c5", ... 2832 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2833 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2834 // but efficient. 2835 switch (Name.size()) { 2836 default: return -1; 2837 case 2: 2838 if (Name[0] != CoprocOp) 2839 return -1; 2840 switch (Name[1]) { 2841 default: return -1; 2842 case '0': return 0; 2843 case '1': return 1; 2844 case '2': return 2; 2845 case '3': return 3; 2846 case '4': return 4; 2847 case '5': return 5; 2848 case '6': return 6; 2849 case '7': return 7; 2850 case '8': return 8; 2851 case '9': return 9; 2852 } 2853 case 3: 2854 if (Name[0] != CoprocOp || Name[1] != '1') 2855 return -1; 2856 switch (Name[2]) { 2857 default: return -1; 2858 case '0': return 10; 2859 case '1': return 11; 2860 case '2': return 12; 2861 case '3': return 13; 2862 case '4': return 14; 2863 case '5': return 15; 2864 } 2865 } 2866 } 2867 2868 /// parseITCondCode - Try to parse a condition code for an IT instruction. 2869 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2870 parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2871 SMLoc S = Parser.getTok().getLoc(); 2872 const AsmToken &Tok = Parser.getTok(); 2873 if (!Tok.is(AsmToken::Identifier)) 2874 return MatchOperand_NoMatch; 2875 unsigned CC = StringSwitch<unsigned>(Tok.getString().lower()) 2876 .Case("eq", ARMCC::EQ) 2877 .Case("ne", ARMCC::NE) 2878 .Case("hs", ARMCC::HS) 2879 .Case("cs", ARMCC::HS) 2880 .Case("lo", ARMCC::LO) 2881 .Case("cc", ARMCC::LO) 2882 .Case("mi", ARMCC::MI) 2883 .Case("pl", ARMCC::PL) 2884 .Case("vs", ARMCC::VS) 2885 .Case("vc", ARMCC::VC) 2886 .Case("hi", ARMCC::HI) 2887 .Case("ls", ARMCC::LS) 2888 .Case("ge", ARMCC::GE) 2889 .Case("lt", ARMCC::LT) 2890 .Case("gt", ARMCC::GT) 2891 .Case("le", ARMCC::LE) 2892 .Case("al", ARMCC::AL) 2893 .Default(~0U); 2894 if (CC == ~0U) 2895 return MatchOperand_NoMatch; 2896 Parser.Lex(); // Eat the token. 2897 2898 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2899 2900 return MatchOperand_Success; 2901 } 2902 2903 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2904 /// token must be an Identifier when called, and if it is a coprocessor 2905 /// number, the token is eaten and the operand is added to the operand list. 2906 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2907 parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2908 SMLoc S = Parser.getTok().getLoc(); 2909 const AsmToken &Tok = Parser.getTok(); 2910 if (Tok.isNot(AsmToken::Identifier)) 2911 return MatchOperand_NoMatch; 2912 2913 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2914 if (Num == -1) 2915 return MatchOperand_NoMatch; 2916 2917 Parser.Lex(); // Eat identifier token. 2918 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2919 return MatchOperand_Success; 2920 } 2921 2922 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2923 /// token must be an Identifier when called, and if it is a coprocessor 2924 /// number, the token is eaten and the operand is added to the operand list. 2925 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2926 parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2927 SMLoc S = Parser.getTok().getLoc(); 2928 const AsmToken &Tok = Parser.getTok(); 2929 if (Tok.isNot(AsmToken::Identifier)) 2930 return MatchOperand_NoMatch; 2931 2932 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2933 if (Reg == -1) 2934 return MatchOperand_NoMatch; 2935 2936 Parser.Lex(); // Eat identifier token. 2937 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2938 return MatchOperand_Success; 2939 } 2940 2941 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2942 /// coproc_option : '{' imm0_255 '}' 2943 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2944 parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2945 SMLoc S = Parser.getTok().getLoc(); 2946 2947 // If this isn't a '{', this isn't a coprocessor immediate operand. 2948 if (Parser.getTok().isNot(AsmToken::LCurly)) 2949 return MatchOperand_NoMatch; 2950 Parser.Lex(); // Eat the '{' 2951 2952 const MCExpr *Expr; 2953 SMLoc Loc = Parser.getTok().getLoc(); 2954 if (getParser().parseExpression(Expr)) { 2955 Error(Loc, "illegal expression"); 2956 return MatchOperand_ParseFail; 2957 } 2958 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2959 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2960 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2961 return MatchOperand_ParseFail; 2962 } 2963 int Val = CE->getValue(); 2964 2965 // Check for and consume the closing '}' 2966 if (Parser.getTok().isNot(AsmToken::RCurly)) 2967 return MatchOperand_ParseFail; 2968 SMLoc E = Parser.getTok().getEndLoc(); 2969 Parser.Lex(); // Eat the '}' 2970 2971 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2972 return MatchOperand_Success; 2973 } 2974 2975 // For register list parsing, we need to map from raw GPR register numbering 2976 // to the enumeration values. The enumeration values aren't sorted by 2977 // register number due to our using "sp", "lr" and "pc" as canonical names. 2978 static unsigned getNextRegister(unsigned Reg) { 2979 // If this is a GPR, we need to do it manually, otherwise we can rely 2980 // on the sort ordering of the enumeration since the other reg-classes 2981 // are sane. 2982 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2983 return Reg + 1; 2984 switch(Reg) { 2985 default: llvm_unreachable("Invalid GPR number!"); 2986 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2987 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2988 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2989 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2990 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2991 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2992 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2993 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2994 } 2995 } 2996 2997 // Return the low-subreg of a given Q register. 2998 static unsigned getDRegFromQReg(unsigned QReg) { 2999 switch (QReg) { 3000 default: llvm_unreachable("expected a Q register!"); 3001 case ARM::Q0: return ARM::D0; 3002 case ARM::Q1: return ARM::D2; 3003 case ARM::Q2: return ARM::D4; 3004 case ARM::Q3: return ARM::D6; 3005 case ARM::Q4: return ARM::D8; 3006 case ARM::Q5: return ARM::D10; 3007 case ARM::Q6: return ARM::D12; 3008 case ARM::Q7: return ARM::D14; 3009 case ARM::Q8: return ARM::D16; 3010 case ARM::Q9: return ARM::D18; 3011 case ARM::Q10: return ARM::D20; 3012 case ARM::Q11: return ARM::D22; 3013 case ARM::Q12: return ARM::D24; 3014 case ARM::Q13: return ARM::D26; 3015 case ARM::Q14: return ARM::D28; 3016 case ARM::Q15: return ARM::D30; 3017 } 3018 } 3019 3020 /// Parse a register list. 3021 bool ARMAsmParser:: 3022 parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3023 assert(Parser.getTok().is(AsmToken::LCurly) && 3024 "Token is not a Left Curly Brace"); 3025 SMLoc S = Parser.getTok().getLoc(); 3026 Parser.Lex(); // Eat '{' token. 3027 SMLoc RegLoc = Parser.getTok().getLoc(); 3028 3029 // Check the first register in the list to see what register class 3030 // this is a list of. 3031 int Reg = tryParseRegister(); 3032 if (Reg == -1) 3033 return Error(RegLoc, "register expected"); 3034 3035 // The reglist instructions have at most 16 registers, so reserve 3036 // space for that many. 3037 int EReg = 0; 3038 SmallVector<std::pair<unsigned, unsigned>, 16> Registers; 3039 3040 // Allow Q regs and just interpret them as the two D sub-registers. 3041 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 3042 Reg = getDRegFromQReg(Reg); 3043 EReg = MRI->getEncodingValue(Reg); 3044 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 3045 ++Reg; 3046 } 3047 const MCRegisterClass *RC; 3048 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 3049 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 3050 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 3051 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 3052 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 3053 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 3054 else 3055 return Error(RegLoc, "invalid register in register list"); 3056 3057 // Store the register. 3058 EReg = MRI->getEncodingValue(Reg); 3059 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 3060 3061 // This starts immediately after the first register token in the list, 3062 // so we can see either a comma or a minus (range separator) as a legal 3063 // next token. 3064 while (Parser.getTok().is(AsmToken::Comma) || 3065 Parser.getTok().is(AsmToken::Minus)) { 3066 if (Parser.getTok().is(AsmToken::Minus)) { 3067 Parser.Lex(); // Eat the minus. 3068 SMLoc AfterMinusLoc = Parser.getTok().getLoc(); 3069 int EndReg = tryParseRegister(); 3070 if (EndReg == -1) 3071 return Error(AfterMinusLoc, "register expected"); 3072 // Allow Q regs and just interpret them as the two D sub-registers. 3073 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 3074 EndReg = getDRegFromQReg(EndReg) + 1; 3075 // If the register is the same as the start reg, there's nothing 3076 // more to do. 3077 if (Reg == EndReg) 3078 continue; 3079 // The register must be in the same register class as the first. 3080 if (!RC->contains(EndReg)) 3081 return Error(AfterMinusLoc, "invalid register in register list"); 3082 // Ranges must go from low to high. 3083 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg)) 3084 return Error(AfterMinusLoc, "bad range in register list"); 3085 3086 // Add all the registers in the range to the register list. 3087 while (Reg != EndReg) { 3088 Reg = getNextRegister(Reg); 3089 EReg = MRI->getEncodingValue(Reg); 3090 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 3091 } 3092 continue; 3093 } 3094 Parser.Lex(); // Eat the comma. 3095 RegLoc = Parser.getTok().getLoc(); 3096 int OldReg = Reg; 3097 const AsmToken RegTok = Parser.getTok(); 3098 Reg = tryParseRegister(); 3099 if (Reg == -1) 3100 return Error(RegLoc, "register expected"); 3101 // Allow Q regs and just interpret them as the two D sub-registers. 3102 bool isQReg = false; 3103 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 3104 Reg = getDRegFromQReg(Reg); 3105 isQReg = true; 3106 } 3107 // The register must be in the same register class as the first. 3108 if (!RC->contains(Reg)) 3109 return Error(RegLoc, "invalid register in register list"); 3110 // List must be monotonically increasing. 3111 if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) { 3112 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 3113 Warning(RegLoc, "register list not in ascending order"); 3114 else 3115 return Error(RegLoc, "register list not in ascending order"); 3116 } 3117 if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) { 3118 Warning(RegLoc, "duplicated register (" + RegTok.getString() + 3119 ") in register list"); 3120 continue; 3121 } 3122 // VFP register lists must also be contiguous. 3123 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 3124 Reg != OldReg + 1) 3125 return Error(RegLoc, "non-contiguous register range"); 3126 EReg = MRI->getEncodingValue(Reg); 3127 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 3128 if (isQReg) { 3129 EReg = MRI->getEncodingValue(++Reg); 3130 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 3131 } 3132 } 3133 3134 if (Parser.getTok().isNot(AsmToken::RCurly)) 3135 return Error(Parser.getTok().getLoc(), "'}' expected"); 3136 SMLoc E = Parser.getTok().getEndLoc(); 3137 Parser.Lex(); // Eat '}' token. 3138 3139 // Push the register list operand. 3140 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 3141 3142 // The ARM system instruction variants for LDM/STM have a '^' token here. 3143 if (Parser.getTok().is(AsmToken::Caret)) { 3144 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc())); 3145 Parser.Lex(); // Eat '^' token. 3146 } 3147 3148 return false; 3149 } 3150 3151 // Helper function to parse the lane index for vector lists. 3152 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3153 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) { 3154 Index = 0; // Always return a defined index value. 3155 if (Parser.getTok().is(AsmToken::LBrac)) { 3156 Parser.Lex(); // Eat the '['. 3157 if (Parser.getTok().is(AsmToken::RBrac)) { 3158 // "Dn[]" is the 'all lanes' syntax. 3159 LaneKind = AllLanes; 3160 EndLoc = Parser.getTok().getEndLoc(); 3161 Parser.Lex(); // Eat the ']'. 3162 return MatchOperand_Success; 3163 } 3164 3165 // There's an optional '#' token here. Normally there wouldn't be, but 3166 // inline assemble puts one in, and it's friendly to accept that. 3167 if (Parser.getTok().is(AsmToken::Hash)) 3168 Parser.Lex(); // Eat '#' or '$'. 3169 3170 const MCExpr *LaneIndex; 3171 SMLoc Loc = Parser.getTok().getLoc(); 3172 if (getParser().parseExpression(LaneIndex)) { 3173 Error(Loc, "illegal expression"); 3174 return MatchOperand_ParseFail; 3175 } 3176 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex); 3177 if (!CE) { 3178 Error(Loc, "lane index must be empty or an integer"); 3179 return MatchOperand_ParseFail; 3180 } 3181 if (Parser.getTok().isNot(AsmToken::RBrac)) { 3182 Error(Parser.getTok().getLoc(), "']' expected"); 3183 return MatchOperand_ParseFail; 3184 } 3185 EndLoc = Parser.getTok().getEndLoc(); 3186 Parser.Lex(); // Eat the ']'. 3187 int64_t Val = CE->getValue(); 3188 3189 // FIXME: Make this range check context sensitive for .8, .16, .32. 3190 if (Val < 0 || Val > 7) { 3191 Error(Parser.getTok().getLoc(), "lane index out of range"); 3192 return MatchOperand_ParseFail; 3193 } 3194 Index = Val; 3195 LaneKind = IndexedLane; 3196 return MatchOperand_Success; 3197 } 3198 LaneKind = NoLanes; 3199 return MatchOperand_Success; 3200 } 3201 3202 // parse a vector register list 3203 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3204 parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3205 VectorLaneTy LaneKind; 3206 unsigned LaneIndex; 3207 SMLoc S = Parser.getTok().getLoc(); 3208 // As an extension (to match gas), support a plain D register or Q register 3209 // (without encosing curly braces) as a single or double entry list, 3210 // respectively. 3211 if (Parser.getTok().is(AsmToken::Identifier)) { 3212 SMLoc E = Parser.getTok().getEndLoc(); 3213 int Reg = tryParseRegister(); 3214 if (Reg == -1) 3215 return MatchOperand_NoMatch; 3216 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 3217 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E); 3218 if (Res != MatchOperand_Success) 3219 return Res; 3220 switch (LaneKind) { 3221 case NoLanes: 3222 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E)); 3223 break; 3224 case AllLanes: 3225 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false, 3226 S, E)); 3227 break; 3228 case IndexedLane: 3229 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1, 3230 LaneIndex, 3231 false, S, E)); 3232 break; 3233 } 3234 return MatchOperand_Success; 3235 } 3236 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 3237 Reg = getDRegFromQReg(Reg); 3238 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E); 3239 if (Res != MatchOperand_Success) 3240 return Res; 3241 switch (LaneKind) { 3242 case NoLanes: 3243 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, 3244 &ARMMCRegisterClasses[ARM::DPairRegClassID]); 3245 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E)); 3246 break; 3247 case AllLanes: 3248 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, 3249 &ARMMCRegisterClasses[ARM::DPairRegClassID]); 3250 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false, 3251 S, E)); 3252 break; 3253 case IndexedLane: 3254 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2, 3255 LaneIndex, 3256 false, S, E)); 3257 break; 3258 } 3259 return MatchOperand_Success; 3260 } 3261 Error(S, "vector register expected"); 3262 return MatchOperand_ParseFail; 3263 } 3264 3265 if (Parser.getTok().isNot(AsmToken::LCurly)) 3266 return MatchOperand_NoMatch; 3267 3268 Parser.Lex(); // Eat '{' token. 3269 SMLoc RegLoc = Parser.getTok().getLoc(); 3270 3271 int Reg = tryParseRegister(); 3272 if (Reg == -1) { 3273 Error(RegLoc, "register expected"); 3274 return MatchOperand_ParseFail; 3275 } 3276 unsigned Count = 1; 3277 int Spacing = 0; 3278 unsigned FirstReg = Reg; 3279 // The list is of D registers, but we also allow Q regs and just interpret 3280 // them as the two D sub-registers. 3281 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 3282 FirstReg = Reg = getDRegFromQReg(Reg); 3283 Spacing = 1; // double-spacing requires explicit D registers, otherwise 3284 // it's ambiguous with four-register single spaced. 3285 ++Reg; 3286 ++Count; 3287 } 3288 3289 SMLoc E; 3290 if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success) 3291 return MatchOperand_ParseFail; 3292 3293 while (Parser.getTok().is(AsmToken::Comma) || 3294 Parser.getTok().is(AsmToken::Minus)) { 3295 if (Parser.getTok().is(AsmToken::Minus)) { 3296 if (!Spacing) 3297 Spacing = 1; // Register range implies a single spaced list. 3298 else if (Spacing == 2) { 3299 Error(Parser.getTok().getLoc(), 3300 "sequential registers in double spaced list"); 3301 return MatchOperand_ParseFail; 3302 } 3303 Parser.Lex(); // Eat the minus. 3304 SMLoc AfterMinusLoc = Parser.getTok().getLoc(); 3305 int EndReg = tryParseRegister(); 3306 if (EndReg == -1) { 3307 Error(AfterMinusLoc, "register expected"); 3308 return MatchOperand_ParseFail; 3309 } 3310 // Allow Q regs and just interpret them as the two D sub-registers. 3311 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 3312 EndReg = getDRegFromQReg(EndReg) + 1; 3313 // If the register is the same as the start reg, there's nothing 3314 // more to do. 3315 if (Reg == EndReg) 3316 continue; 3317 // The register must be in the same register class as the first. 3318 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { 3319 Error(AfterMinusLoc, "invalid register in register list"); 3320 return MatchOperand_ParseFail; 3321 } 3322 // Ranges must go from low to high. 3323 if (Reg > EndReg) { 3324 Error(AfterMinusLoc, "bad range in register list"); 3325 return MatchOperand_ParseFail; 3326 } 3327 // Parse the lane specifier if present. 3328 VectorLaneTy NextLaneKind; 3329 unsigned NextLaneIndex; 3330 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != 3331 MatchOperand_Success) 3332 return MatchOperand_ParseFail; 3333 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 3334 Error(AfterMinusLoc, "mismatched lane index in register list"); 3335 return MatchOperand_ParseFail; 3336 } 3337 3338 // Add all the registers in the range to the register list. 3339 Count += EndReg - Reg; 3340 Reg = EndReg; 3341 continue; 3342 } 3343 Parser.Lex(); // Eat the comma. 3344 RegLoc = Parser.getTok().getLoc(); 3345 int OldReg = Reg; 3346 Reg = tryParseRegister(); 3347 if (Reg == -1) { 3348 Error(RegLoc, "register expected"); 3349 return MatchOperand_ParseFail; 3350 } 3351 // vector register lists must be contiguous. 3352 // It's OK to use the enumeration values directly here rather, as the 3353 // VFP register classes have the enum sorted properly. 3354 // 3355 // The list is of D registers, but we also allow Q regs and just interpret 3356 // them as the two D sub-registers. 3357 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 3358 if (!Spacing) 3359 Spacing = 1; // Register range implies a single spaced list. 3360 else if (Spacing == 2) { 3361 Error(RegLoc, 3362 "invalid register in double-spaced list (must be 'D' register')"); 3363 return MatchOperand_ParseFail; 3364 } 3365 Reg = getDRegFromQReg(Reg); 3366 if (Reg != OldReg + 1) { 3367 Error(RegLoc, "non-contiguous register range"); 3368 return MatchOperand_ParseFail; 3369 } 3370 ++Reg; 3371 Count += 2; 3372 // Parse the lane specifier if present. 3373 VectorLaneTy NextLaneKind; 3374 unsigned NextLaneIndex; 3375 SMLoc LaneLoc = Parser.getTok().getLoc(); 3376 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != 3377 MatchOperand_Success) 3378 return MatchOperand_ParseFail; 3379 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 3380 Error(LaneLoc, "mismatched lane index in register list"); 3381 return MatchOperand_ParseFail; 3382 } 3383 continue; 3384 } 3385 // Normal D register. 3386 // Figure out the register spacing (single or double) of the list if 3387 // we don't know it already. 3388 if (!Spacing) 3389 Spacing = 1 + (Reg == OldReg + 2); 3390 3391 // Just check that it's contiguous and keep going. 3392 if (Reg != OldReg + Spacing) { 3393 Error(RegLoc, "non-contiguous register range"); 3394 return MatchOperand_ParseFail; 3395 } 3396 ++Count; 3397 // Parse the lane specifier if present. 3398 VectorLaneTy NextLaneKind; 3399 unsigned NextLaneIndex; 3400 SMLoc EndLoc = Parser.getTok().getLoc(); 3401 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success) 3402 return MatchOperand_ParseFail; 3403 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 3404 Error(EndLoc, "mismatched lane index in register list"); 3405 return MatchOperand_ParseFail; 3406 } 3407 } 3408 3409 if (Parser.getTok().isNot(AsmToken::RCurly)) { 3410 Error(Parser.getTok().getLoc(), "'}' expected"); 3411 return MatchOperand_ParseFail; 3412 } 3413 E = Parser.getTok().getEndLoc(); 3414 Parser.Lex(); // Eat '}' token. 3415 3416 switch (LaneKind) { 3417 case NoLanes: 3418 // Two-register operands have been converted to the 3419 // composite register classes. 3420 if (Count == 2) { 3421 const MCRegisterClass *RC = (Spacing == 1) ? 3422 &ARMMCRegisterClasses[ARM::DPairRegClassID] : 3423 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; 3424 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); 3425 } 3426 3427 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, 3428 (Spacing == 2), S, E)); 3429 break; 3430 case AllLanes: 3431 // Two-register operands have been converted to the 3432 // composite register classes. 3433 if (Count == 2) { 3434 const MCRegisterClass *RC = (Spacing == 1) ? 3435 &ARMMCRegisterClasses[ARM::DPairRegClassID] : 3436 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; 3437 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); 3438 } 3439 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count, 3440 (Spacing == 2), 3441 S, E)); 3442 break; 3443 case IndexedLane: 3444 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count, 3445 LaneIndex, 3446 (Spacing == 2), 3447 S, E)); 3448 break; 3449 } 3450 return MatchOperand_Success; 3451 } 3452 3453 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 3454 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3455 parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3456 SMLoc S = Parser.getTok().getLoc(); 3457 const AsmToken &Tok = Parser.getTok(); 3458 unsigned Opt; 3459 3460 if (Tok.is(AsmToken::Identifier)) { 3461 StringRef OptStr = Tok.getString(); 3462 3463 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower()) 3464 .Case("sy", ARM_MB::SY) 3465 .Case("st", ARM_MB::ST) 3466 .Case("ld", ARM_MB::LD) 3467 .Case("sh", ARM_MB::ISH) 3468 .Case("ish", ARM_MB::ISH) 3469 .Case("shst", ARM_MB::ISHST) 3470 .Case("ishst", ARM_MB::ISHST) 3471 .Case("ishld", ARM_MB::ISHLD) 3472 .Case("nsh", ARM_MB::NSH) 3473 .Case("un", ARM_MB::NSH) 3474 .Case("nshst", ARM_MB::NSHST) 3475 .Case("nshld", ARM_MB::NSHLD) 3476 .Case("unst", ARM_MB::NSHST) 3477 .Case("osh", ARM_MB::OSH) 3478 .Case("oshst", ARM_MB::OSHST) 3479 .Case("oshld", ARM_MB::OSHLD) 3480 .Default(~0U); 3481 3482 // ishld, oshld, nshld and ld are only available from ARMv8. 3483 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD || 3484 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD)) 3485 Opt = ~0U; 3486 3487 if (Opt == ~0U) 3488 return MatchOperand_NoMatch; 3489 3490 Parser.Lex(); // Eat identifier token. 3491 } else if (Tok.is(AsmToken::Hash) || 3492 Tok.is(AsmToken::Dollar) || 3493 Tok.is(AsmToken::Integer)) { 3494 if (Parser.getTok().isNot(AsmToken::Integer)) 3495 Parser.Lex(); // Eat '#' or '$'. 3496 SMLoc Loc = Parser.getTok().getLoc(); 3497 3498 const MCExpr *MemBarrierID; 3499 if (getParser().parseExpression(MemBarrierID)) { 3500 Error(Loc, "illegal expression"); 3501 return MatchOperand_ParseFail; 3502 } 3503 3504 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID); 3505 if (!CE) { 3506 Error(Loc, "constant expression expected"); 3507 return MatchOperand_ParseFail; 3508 } 3509 3510 int Val = CE->getValue(); 3511 if (Val & ~0xf) { 3512 Error(Loc, "immediate value out of range"); 3513 return MatchOperand_ParseFail; 3514 } 3515 3516 Opt = ARM_MB::RESERVED_0 + Val; 3517 } else 3518 return MatchOperand_ParseFail; 3519 3520 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 3521 return MatchOperand_Success; 3522 } 3523 3524 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options. 3525 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3526 parseInstSyncBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3527 SMLoc S = Parser.getTok().getLoc(); 3528 const AsmToken &Tok = Parser.getTok(); 3529 unsigned Opt; 3530 3531 if (Tok.is(AsmToken::Identifier)) { 3532 StringRef OptStr = Tok.getString(); 3533 3534 if (OptStr.lower() == "sy") 3535 Opt = ARM_ISB::SY; 3536 else 3537 return MatchOperand_NoMatch; 3538 3539 Parser.Lex(); // Eat identifier token. 3540 } else if (Tok.is(AsmToken::Hash) || 3541 Tok.is(AsmToken::Dollar) || 3542 Tok.is(AsmToken::Integer)) { 3543 if (Parser.getTok().isNot(AsmToken::Integer)) 3544 Parser.Lex(); // Eat '#' or '$'. 3545 SMLoc Loc = Parser.getTok().getLoc(); 3546 3547 const MCExpr *ISBarrierID; 3548 if (getParser().parseExpression(ISBarrierID)) { 3549 Error(Loc, "illegal expression"); 3550 return MatchOperand_ParseFail; 3551 } 3552 3553 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID); 3554 if (!CE) { 3555 Error(Loc, "constant expression expected"); 3556 return MatchOperand_ParseFail; 3557 } 3558 3559 int Val = CE->getValue(); 3560 if (Val & ~0xf) { 3561 Error(Loc, "immediate value out of range"); 3562 return MatchOperand_ParseFail; 3563 } 3564 3565 Opt = ARM_ISB::RESERVED_0 + Val; 3566 } else 3567 return MatchOperand_ParseFail; 3568 3569 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt( 3570 (ARM_ISB::InstSyncBOpt)Opt, S)); 3571 return MatchOperand_Success; 3572 } 3573 3574 3575 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 3576 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3577 parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3578 SMLoc S = Parser.getTok().getLoc(); 3579 const AsmToken &Tok = Parser.getTok(); 3580 if (!Tok.is(AsmToken::Identifier)) 3581 return MatchOperand_NoMatch; 3582 StringRef IFlagsStr = Tok.getString(); 3583 3584 // An iflags string of "none" is interpreted to mean that none of the AIF 3585 // bits are set. Not a terribly useful instruction, but a valid encoding. 3586 unsigned IFlags = 0; 3587 if (IFlagsStr != "none") { 3588 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 3589 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 3590 .Case("a", ARM_PROC::A) 3591 .Case("i", ARM_PROC::I) 3592 .Case("f", ARM_PROC::F) 3593 .Default(~0U); 3594 3595 // If some specific iflag is already set, it means that some letter is 3596 // present more than once, this is not acceptable. 3597 if (Flag == ~0U || (IFlags & Flag)) 3598 return MatchOperand_NoMatch; 3599 3600 IFlags |= Flag; 3601 } 3602 } 3603 3604 Parser.Lex(); // Eat identifier token. 3605 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 3606 return MatchOperand_Success; 3607 } 3608 3609 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 3610 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3611 parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3612 SMLoc S = Parser.getTok().getLoc(); 3613 const AsmToken &Tok = Parser.getTok(); 3614 if (!Tok.is(AsmToken::Identifier)) 3615 return MatchOperand_NoMatch; 3616 StringRef Mask = Tok.getString(); 3617 3618 if (isMClass()) { 3619 // See ARMv6-M 10.1.1 3620 std::string Name = Mask.lower(); 3621 unsigned FlagsVal = StringSwitch<unsigned>(Name) 3622 // Note: in the documentation: 3623 // ARM deprecates using MSR APSR without a _<bits> qualifier as an alias 3624 // for MSR APSR_nzcvq. 3625 // but we do make it an alias here. This is so to get the "mask encoding" 3626 // bits correct on MSR APSR writes. 3627 // 3628 // FIXME: Note the 0xc00 "mask encoding" bits version of the registers 3629 // should really only be allowed when writing a special register. Note 3630 // they get dropped in the MRS instruction reading a special register as 3631 // the SYSm field is only 8 bits. 3632 // 3633 // FIXME: the _g and _nzcvqg versions are only allowed if the processor 3634 // includes the DSP extension but that is not checked. 3635 .Case("apsr", 0x800) 3636 .Case("apsr_nzcvq", 0x800) 3637 .Case("apsr_g", 0x400) 3638 .Case("apsr_nzcvqg", 0xc00) 3639 .Case("iapsr", 0x801) 3640 .Case("iapsr_nzcvq", 0x801) 3641 .Case("iapsr_g", 0x401) 3642 .Case("iapsr_nzcvqg", 0xc01) 3643 .Case("eapsr", 0x802) 3644 .Case("eapsr_nzcvq", 0x802) 3645 .Case("eapsr_g", 0x402) 3646 .Case("eapsr_nzcvqg", 0xc02) 3647 .Case("xpsr", 0x803) 3648 .Case("xpsr_nzcvq", 0x803) 3649 .Case("xpsr_g", 0x403) 3650 .Case("xpsr_nzcvqg", 0xc03) 3651 .Case("ipsr", 0x805) 3652 .Case("epsr", 0x806) 3653 .Case("iepsr", 0x807) 3654 .Case("msp", 0x808) 3655 .Case("psp", 0x809) 3656 .Case("primask", 0x810) 3657 .Case("basepri", 0x811) 3658 .Case("basepri_max", 0x812) 3659 .Case("faultmask", 0x813) 3660 .Case("control", 0x814) 3661 .Default(~0U); 3662 3663 if (FlagsVal == ~0U) 3664 return MatchOperand_NoMatch; 3665 3666 if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813) 3667 // basepri, basepri_max and faultmask only valid for V7m. 3668 return MatchOperand_NoMatch; 3669 3670 Parser.Lex(); // Eat identifier token. 3671 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3672 return MatchOperand_Success; 3673 } 3674 3675 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 3676 size_t Start = 0, Next = Mask.find('_'); 3677 StringRef Flags = ""; 3678 std::string SpecReg = Mask.slice(Start, Next).lower(); 3679 if (Next != StringRef::npos) 3680 Flags = Mask.slice(Next+1, Mask.size()); 3681 3682 // FlagsVal contains the complete mask: 3683 // 3-0: Mask 3684 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3685 unsigned FlagsVal = 0; 3686 3687 if (SpecReg == "apsr") { 3688 FlagsVal = StringSwitch<unsigned>(Flags) 3689 .Case("nzcvq", 0x8) // same as CPSR_f 3690 .Case("g", 0x4) // same as CPSR_s 3691 .Case("nzcvqg", 0xc) // same as CPSR_fs 3692 .Default(~0U); 3693 3694 if (FlagsVal == ~0U) { 3695 if (!Flags.empty()) 3696 return MatchOperand_NoMatch; 3697 else 3698 FlagsVal = 8; // No flag 3699 } 3700 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 3701 // cpsr_all is an alias for cpsr_fc, as is plain cpsr. 3702 if (Flags == "all" || Flags == "") 3703 Flags = "fc"; 3704 for (int i = 0, e = Flags.size(); i != e; ++i) { 3705 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 3706 .Case("c", 1) 3707 .Case("x", 2) 3708 .Case("s", 4) 3709 .Case("f", 8) 3710 .Default(~0U); 3711 3712 // If some specific flag is already set, it means that some letter is 3713 // present more than once, this is not acceptable. 3714 if (FlagsVal == ~0U || (FlagsVal & Flag)) 3715 return MatchOperand_NoMatch; 3716 FlagsVal |= Flag; 3717 } 3718 } else // No match for special register. 3719 return MatchOperand_NoMatch; 3720 3721 // Special register without flags is NOT equivalent to "fc" flags. 3722 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 3723 // two lines would enable gas compatibility at the expense of breaking 3724 // round-tripping. 3725 // 3726 // if (!FlagsVal) 3727 // FlagsVal = 0x9; 3728 3729 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3730 if (SpecReg == "spsr") 3731 FlagsVal |= 16; 3732 3733 Parser.Lex(); // Eat identifier token. 3734 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3735 return MatchOperand_Success; 3736 } 3737 3738 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3739 parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 3740 int Low, int High) { 3741 const AsmToken &Tok = Parser.getTok(); 3742 if (Tok.isNot(AsmToken::Identifier)) { 3743 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3744 return MatchOperand_ParseFail; 3745 } 3746 StringRef ShiftName = Tok.getString(); 3747 std::string LowerOp = Op.lower(); 3748 std::string UpperOp = Op.upper(); 3749 if (ShiftName != LowerOp && ShiftName != UpperOp) { 3750 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3751 return MatchOperand_ParseFail; 3752 } 3753 Parser.Lex(); // Eat shift type token. 3754 3755 // There must be a '#' and a shift amount. 3756 if (Parser.getTok().isNot(AsmToken::Hash) && 3757 Parser.getTok().isNot(AsmToken::Dollar)) { 3758 Error(Parser.getTok().getLoc(), "'#' expected"); 3759 return MatchOperand_ParseFail; 3760 } 3761 Parser.Lex(); // Eat hash token. 3762 3763 const MCExpr *ShiftAmount; 3764 SMLoc Loc = Parser.getTok().getLoc(); 3765 SMLoc EndLoc; 3766 if (getParser().parseExpression(ShiftAmount, EndLoc)) { 3767 Error(Loc, "illegal expression"); 3768 return MatchOperand_ParseFail; 3769 } 3770 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3771 if (!CE) { 3772 Error(Loc, "constant expression expected"); 3773 return MatchOperand_ParseFail; 3774 } 3775 int Val = CE->getValue(); 3776 if (Val < Low || Val > High) { 3777 Error(Loc, "immediate value out of range"); 3778 return MatchOperand_ParseFail; 3779 } 3780 3781 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc)); 3782 3783 return MatchOperand_Success; 3784 } 3785 3786 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3787 parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3788 const AsmToken &Tok = Parser.getTok(); 3789 SMLoc S = Tok.getLoc(); 3790 if (Tok.isNot(AsmToken::Identifier)) { 3791 Error(S, "'be' or 'le' operand expected"); 3792 return MatchOperand_ParseFail; 3793 } 3794 int Val = StringSwitch<int>(Tok.getString().lower()) 3795 .Case("be", 1) 3796 .Case("le", 0) 3797 .Default(-1); 3798 Parser.Lex(); // Eat the token. 3799 3800 if (Val == -1) { 3801 Error(S, "'be' or 'le' operand expected"); 3802 return MatchOperand_ParseFail; 3803 } 3804 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 3805 getContext()), 3806 S, Tok.getEndLoc())); 3807 return MatchOperand_Success; 3808 } 3809 3810 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 3811 /// instructions. Legal values are: 3812 /// lsl #n 'n' in [0,31] 3813 /// asr #n 'n' in [1,32] 3814 /// n == 32 encoded as n == 0. 3815 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3816 parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3817 const AsmToken &Tok = Parser.getTok(); 3818 SMLoc S = Tok.getLoc(); 3819 if (Tok.isNot(AsmToken::Identifier)) { 3820 Error(S, "shift operator 'asr' or 'lsl' expected"); 3821 return MatchOperand_ParseFail; 3822 } 3823 StringRef ShiftName = Tok.getString(); 3824 bool isASR; 3825 if (ShiftName == "lsl" || ShiftName == "LSL") 3826 isASR = false; 3827 else if (ShiftName == "asr" || ShiftName == "ASR") 3828 isASR = true; 3829 else { 3830 Error(S, "shift operator 'asr' or 'lsl' expected"); 3831 return MatchOperand_ParseFail; 3832 } 3833 Parser.Lex(); // Eat the operator. 3834 3835 // A '#' and a shift amount. 3836 if (Parser.getTok().isNot(AsmToken::Hash) && 3837 Parser.getTok().isNot(AsmToken::Dollar)) { 3838 Error(Parser.getTok().getLoc(), "'#' expected"); 3839 return MatchOperand_ParseFail; 3840 } 3841 Parser.Lex(); // Eat hash token. 3842 SMLoc ExLoc = Parser.getTok().getLoc(); 3843 3844 const MCExpr *ShiftAmount; 3845 SMLoc EndLoc; 3846 if (getParser().parseExpression(ShiftAmount, EndLoc)) { 3847 Error(ExLoc, "malformed shift expression"); 3848 return MatchOperand_ParseFail; 3849 } 3850 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3851 if (!CE) { 3852 Error(ExLoc, "shift amount must be an immediate"); 3853 return MatchOperand_ParseFail; 3854 } 3855 3856 int64_t Val = CE->getValue(); 3857 if (isASR) { 3858 // Shift amount must be in [1,32] 3859 if (Val < 1 || Val > 32) { 3860 Error(ExLoc, "'asr' shift amount must be in range [1,32]"); 3861 return MatchOperand_ParseFail; 3862 } 3863 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 3864 if (isThumb() && Val == 32) { 3865 Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode"); 3866 return MatchOperand_ParseFail; 3867 } 3868 if (Val == 32) Val = 0; 3869 } else { 3870 // Shift amount must be in [1,32] 3871 if (Val < 0 || Val > 31) { 3872 Error(ExLoc, "'lsr' shift amount must be in range [0,31]"); 3873 return MatchOperand_ParseFail; 3874 } 3875 } 3876 3877 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc)); 3878 3879 return MatchOperand_Success; 3880 } 3881 3882 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 3883 /// of instructions. Legal values are: 3884 /// ror #n 'n' in {0, 8, 16, 24} 3885 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3886 parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3887 const AsmToken &Tok = Parser.getTok(); 3888 SMLoc S = Tok.getLoc(); 3889 if (Tok.isNot(AsmToken::Identifier)) 3890 return MatchOperand_NoMatch; 3891 StringRef ShiftName = Tok.getString(); 3892 if (ShiftName != "ror" && ShiftName != "ROR") 3893 return MatchOperand_NoMatch; 3894 Parser.Lex(); // Eat the operator. 3895 3896 // A '#' and a rotate amount. 3897 if (Parser.getTok().isNot(AsmToken::Hash) && 3898 Parser.getTok().isNot(AsmToken::Dollar)) { 3899 Error(Parser.getTok().getLoc(), "'#' expected"); 3900 return MatchOperand_ParseFail; 3901 } 3902 Parser.Lex(); // Eat hash token. 3903 SMLoc ExLoc = Parser.getTok().getLoc(); 3904 3905 const MCExpr *ShiftAmount; 3906 SMLoc EndLoc; 3907 if (getParser().parseExpression(ShiftAmount, EndLoc)) { 3908 Error(ExLoc, "malformed rotate expression"); 3909 return MatchOperand_ParseFail; 3910 } 3911 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3912 if (!CE) { 3913 Error(ExLoc, "rotate amount must be an immediate"); 3914 return MatchOperand_ParseFail; 3915 } 3916 3917 int64_t Val = CE->getValue(); 3918 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 3919 // normally, zero is represented in asm by omitting the rotate operand 3920 // entirely. 3921 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 3922 Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24"); 3923 return MatchOperand_ParseFail; 3924 } 3925 3926 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc)); 3927 3928 return MatchOperand_Success; 3929 } 3930 3931 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3932 parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3933 SMLoc S = Parser.getTok().getLoc(); 3934 // The bitfield descriptor is really two operands, the LSB and the width. 3935 if (Parser.getTok().isNot(AsmToken::Hash) && 3936 Parser.getTok().isNot(AsmToken::Dollar)) { 3937 Error(Parser.getTok().getLoc(), "'#' expected"); 3938 return MatchOperand_ParseFail; 3939 } 3940 Parser.Lex(); // Eat hash token. 3941 3942 const MCExpr *LSBExpr; 3943 SMLoc E = Parser.getTok().getLoc(); 3944 if (getParser().parseExpression(LSBExpr)) { 3945 Error(E, "malformed immediate expression"); 3946 return MatchOperand_ParseFail; 3947 } 3948 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 3949 if (!CE) { 3950 Error(E, "'lsb' operand must be an immediate"); 3951 return MatchOperand_ParseFail; 3952 } 3953 3954 int64_t LSB = CE->getValue(); 3955 // The LSB must be in the range [0,31] 3956 if (LSB < 0 || LSB > 31) { 3957 Error(E, "'lsb' operand must be in the range [0,31]"); 3958 return MatchOperand_ParseFail; 3959 } 3960 E = Parser.getTok().getLoc(); 3961 3962 // Expect another immediate operand. 3963 if (Parser.getTok().isNot(AsmToken::Comma)) { 3964 Error(Parser.getTok().getLoc(), "too few operands"); 3965 return MatchOperand_ParseFail; 3966 } 3967 Parser.Lex(); // Eat hash token. 3968 if (Parser.getTok().isNot(AsmToken::Hash) && 3969 Parser.getTok().isNot(AsmToken::Dollar)) { 3970 Error(Parser.getTok().getLoc(), "'#' expected"); 3971 return MatchOperand_ParseFail; 3972 } 3973 Parser.Lex(); // Eat hash token. 3974 3975 const MCExpr *WidthExpr; 3976 SMLoc EndLoc; 3977 if (getParser().parseExpression(WidthExpr, EndLoc)) { 3978 Error(E, "malformed immediate expression"); 3979 return MatchOperand_ParseFail; 3980 } 3981 CE = dyn_cast<MCConstantExpr>(WidthExpr); 3982 if (!CE) { 3983 Error(E, "'width' operand must be an immediate"); 3984 return MatchOperand_ParseFail; 3985 } 3986 3987 int64_t Width = CE->getValue(); 3988 // The LSB must be in the range [1,32-lsb] 3989 if (Width < 1 || Width > 32 - LSB) { 3990 Error(E, "'width' operand must be in the range [1,32-lsb]"); 3991 return MatchOperand_ParseFail; 3992 } 3993 3994 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc)); 3995 3996 return MatchOperand_Success; 3997 } 3998 3999 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 4000 parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4001 // Check for a post-index addressing register operand. Specifically: 4002 // postidx_reg := '+' register {, shift} 4003 // | '-' register {, shift} 4004 // | register {, shift} 4005 4006 // This method must return MatchOperand_NoMatch without consuming any tokens 4007 // in the case where there is no match, as other alternatives take other 4008 // parse methods. 4009 AsmToken Tok = Parser.getTok(); 4010 SMLoc S = Tok.getLoc(); 4011 bool haveEaten = false; 4012 bool isAdd = true; 4013 if (Tok.is(AsmToken::Plus)) { 4014 Parser.Lex(); // Eat the '+' token. 4015 haveEaten = true; 4016 } else if (Tok.is(AsmToken::Minus)) { 4017 Parser.Lex(); // Eat the '-' token. 4018 isAdd = false; 4019 haveEaten = true; 4020 } 4021 4022 SMLoc E = Parser.getTok().getEndLoc(); 4023 int Reg = tryParseRegister(); 4024 if (Reg == -1) { 4025 if (!haveEaten) 4026 return MatchOperand_NoMatch; 4027 Error(Parser.getTok().getLoc(), "register expected"); 4028 return MatchOperand_ParseFail; 4029 } 4030 4031 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 4032 unsigned ShiftImm = 0; 4033 if (Parser.getTok().is(AsmToken::Comma)) { 4034 Parser.Lex(); // Eat the ','. 4035 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 4036 return MatchOperand_ParseFail; 4037 4038 // FIXME: Only approximates end...may include intervening whitespace. 4039 E = Parser.getTok().getLoc(); 4040 } 4041 4042 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 4043 ShiftImm, S, E)); 4044 4045 return MatchOperand_Success; 4046 } 4047 4048 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 4049 parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4050 // Check for a post-index addressing register operand. Specifically: 4051 // am3offset := '+' register 4052 // | '-' register 4053 // | register 4054 // | # imm 4055 // | # + imm 4056 // | # - imm 4057 4058 // This method must return MatchOperand_NoMatch without consuming any tokens 4059 // in the case where there is no match, as other alternatives take other 4060 // parse methods. 4061 AsmToken Tok = Parser.getTok(); 4062 SMLoc S = Tok.getLoc(); 4063 4064 // Do immediates first, as we always parse those if we have a '#'. 4065 if (Parser.getTok().is(AsmToken::Hash) || 4066 Parser.getTok().is(AsmToken::Dollar)) { 4067 Parser.Lex(); // Eat '#' or '$'. 4068 // Explicitly look for a '-', as we need to encode negative zero 4069 // differently. 4070 bool isNegative = Parser.getTok().is(AsmToken::Minus); 4071 const MCExpr *Offset; 4072 SMLoc E; 4073 if (getParser().parseExpression(Offset, E)) 4074 return MatchOperand_ParseFail; 4075 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 4076 if (!CE) { 4077 Error(S, "constant expression expected"); 4078 return MatchOperand_ParseFail; 4079 } 4080 // Negative zero is encoded as the flag value INT32_MIN. 4081 int32_t Val = CE->getValue(); 4082 if (isNegative && Val == 0) 4083 Val = INT32_MIN; 4084 4085 Operands.push_back( 4086 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 4087 4088 return MatchOperand_Success; 4089 } 4090 4091 4092 bool haveEaten = false; 4093 bool isAdd = true; 4094 if (Tok.is(AsmToken::Plus)) { 4095 Parser.Lex(); // Eat the '+' token. 4096 haveEaten = true; 4097 } else if (Tok.is(AsmToken::Minus)) { 4098 Parser.Lex(); // Eat the '-' token. 4099 isAdd = false; 4100 haveEaten = true; 4101 } 4102 4103 Tok = Parser.getTok(); 4104 int Reg = tryParseRegister(); 4105 if (Reg == -1) { 4106 if (!haveEaten) 4107 return MatchOperand_NoMatch; 4108 Error(Tok.getLoc(), "register expected"); 4109 return MatchOperand_ParseFail; 4110 } 4111 4112 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 4113 0, S, Tok.getEndLoc())); 4114 4115 return MatchOperand_Success; 4116 } 4117 4118 /// Convert parsed operands to MCInst. Needed here because this instruction 4119 /// only has two register operands, but multiplication is commutative so 4120 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN". 4121 void ARMAsmParser:: 4122 cvtThumbMultiply(MCInst &Inst, 4123 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4124 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 4125 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 4126 // If we have a three-operand form, make sure to set Rn to be the operand 4127 // that isn't the same as Rd. 4128 unsigned RegOp = 4; 4129 if (Operands.size() == 6 && 4130 ((ARMOperand*)Operands[4])->getReg() == 4131 ((ARMOperand*)Operands[3])->getReg()) 4132 RegOp = 5; 4133 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); 4134 Inst.addOperand(Inst.getOperand(0)); 4135 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 4136 } 4137 4138 void ARMAsmParser:: 4139 cvtThumbBranches(MCInst &Inst, 4140 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4141 int CondOp = -1, ImmOp = -1; 4142 switch(Inst.getOpcode()) { 4143 case ARM::tB: 4144 case ARM::tBcc: CondOp = 1; ImmOp = 2; break; 4145 4146 case ARM::t2B: 4147 case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break; 4148 4149 default: llvm_unreachable("Unexpected instruction in cvtThumbBranches"); 4150 } 4151 // first decide whether or not the branch should be conditional 4152 // by looking at it's location relative to an IT block 4153 if(inITBlock()) { 4154 // inside an IT block we cannot have any conditional branches. any 4155 // such instructions needs to be converted to unconditional form 4156 switch(Inst.getOpcode()) { 4157 case ARM::tBcc: Inst.setOpcode(ARM::tB); break; 4158 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break; 4159 } 4160 } else { 4161 // outside IT blocks we can only have unconditional branches with AL 4162 // condition code or conditional branches with non-AL condition code 4163 unsigned Cond = static_cast<ARMOperand*>(Operands[CondOp])->getCondCode(); 4164 switch(Inst.getOpcode()) { 4165 case ARM::tB: 4166 case ARM::tBcc: 4167 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc); 4168 break; 4169 case ARM::t2B: 4170 case ARM::t2Bcc: 4171 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc); 4172 break; 4173 } 4174 } 4175 4176 // now decide on encoding size based on branch target range 4177 switch(Inst.getOpcode()) { 4178 // classify tB as either t2B or t1B based on range of immediate operand 4179 case ARM::tB: { 4180 ARMOperand* op = static_cast<ARMOperand*>(Operands[ImmOp]); 4181 if(!op->isSignedOffset<11, 1>() && isThumbTwo()) 4182 Inst.setOpcode(ARM::t2B); 4183 break; 4184 } 4185 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand 4186 case ARM::tBcc: { 4187 ARMOperand* op = static_cast<ARMOperand*>(Operands[ImmOp]); 4188 if(!op->isSignedOffset<8, 1>() && isThumbTwo()) 4189 Inst.setOpcode(ARM::t2Bcc); 4190 break; 4191 } 4192 } 4193 ((ARMOperand*)Operands[ImmOp])->addImmOperands(Inst, 1); 4194 ((ARMOperand*)Operands[CondOp])->addCondCodeOperands(Inst, 2); 4195 } 4196 4197 /// Parse an ARM memory expression, return false if successful else return true 4198 /// or an error. The first token must be a '[' when called. 4199 bool ARMAsmParser:: 4200 parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4201 SMLoc S, E; 4202 assert(Parser.getTok().is(AsmToken::LBrac) && 4203 "Token is not a Left Bracket"); 4204 S = Parser.getTok().getLoc(); 4205 Parser.Lex(); // Eat left bracket token. 4206 4207 const AsmToken &BaseRegTok = Parser.getTok(); 4208 int BaseRegNum = tryParseRegister(); 4209 if (BaseRegNum == -1) 4210 return Error(BaseRegTok.getLoc(), "register expected"); 4211 4212 // The next token must either be a comma, a colon or a closing bracket. 4213 const AsmToken &Tok = Parser.getTok(); 4214 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) && 4215 !Tok.is(AsmToken::RBrac)) 4216 return Error(Tok.getLoc(), "malformed memory operand"); 4217 4218 if (Tok.is(AsmToken::RBrac)) { 4219 E = Tok.getEndLoc(); 4220 Parser.Lex(); // Eat right bracket token. 4221 4222 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 4223 0, 0, false, S, E)); 4224 4225 // If there's a pre-indexing writeback marker, '!', just add it as a token 4226 // operand. It's rather odd, but syntactically valid. 4227 if (Parser.getTok().is(AsmToken::Exclaim)) { 4228 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4229 Parser.Lex(); // Eat the '!'. 4230 } 4231 4232 return false; 4233 } 4234 4235 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) && 4236 "Lost colon or comma in memory operand?!"); 4237 if (Tok.is(AsmToken::Comma)) { 4238 Parser.Lex(); // Eat the comma. 4239 } 4240 4241 // If we have a ':', it's an alignment specifier. 4242 if (Parser.getTok().is(AsmToken::Colon)) { 4243 Parser.Lex(); // Eat the ':'. 4244 E = Parser.getTok().getLoc(); 4245 4246 const MCExpr *Expr; 4247 if (getParser().parseExpression(Expr)) 4248 return true; 4249 4250 // The expression has to be a constant. Memory references with relocations 4251 // don't come through here, as they use the <label> forms of the relevant 4252 // instructions. 4253 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 4254 if (!CE) 4255 return Error (E, "constant expression expected"); 4256 4257 unsigned Align = 0; 4258 switch (CE->getValue()) { 4259 default: 4260 return Error(E, 4261 "alignment specifier must be 16, 32, 64, 128, or 256 bits"); 4262 case 16: Align = 2; break; 4263 case 32: Align = 4; break; 4264 case 64: Align = 8; break; 4265 case 128: Align = 16; break; 4266 case 256: Align = 32; break; 4267 } 4268 4269 // Now we should have the closing ']' 4270 if (Parser.getTok().isNot(AsmToken::RBrac)) 4271 return Error(Parser.getTok().getLoc(), "']' expected"); 4272 E = Parser.getTok().getEndLoc(); 4273 Parser.Lex(); // Eat right bracket token. 4274 4275 // Don't worry about range checking the value here. That's handled by 4276 // the is*() predicates. 4277 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 4278 ARM_AM::no_shift, 0, Align, 4279 false, S, E)); 4280 4281 // If there's a pre-indexing writeback marker, '!', just add it as a token 4282 // operand. 4283 if (Parser.getTok().is(AsmToken::Exclaim)) { 4284 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4285 Parser.Lex(); // Eat the '!'. 4286 } 4287 4288 return false; 4289 } 4290 4291 // If we have a '#', it's an immediate offset, else assume it's a register 4292 // offset. Be friendly and also accept a plain integer (without a leading 4293 // hash) for gas compatibility. 4294 if (Parser.getTok().is(AsmToken::Hash) || 4295 Parser.getTok().is(AsmToken::Dollar) || 4296 Parser.getTok().is(AsmToken::Integer)) { 4297 if (Parser.getTok().isNot(AsmToken::Integer)) 4298 Parser.Lex(); // Eat '#' or '$'. 4299 E = Parser.getTok().getLoc(); 4300 4301 bool isNegative = getParser().getTok().is(AsmToken::Minus); 4302 const MCExpr *Offset; 4303 if (getParser().parseExpression(Offset)) 4304 return true; 4305 4306 // The expression has to be a constant. Memory references with relocations 4307 // don't come through here, as they use the <label> forms of the relevant 4308 // instructions. 4309 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 4310 if (!CE) 4311 return Error (E, "constant expression expected"); 4312 4313 // If the constant was #-0, represent it as INT32_MIN. 4314 int32_t Val = CE->getValue(); 4315 if (isNegative && Val == 0) 4316 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 4317 4318 // Now we should have the closing ']' 4319 if (Parser.getTok().isNot(AsmToken::RBrac)) 4320 return Error(Parser.getTok().getLoc(), "']' expected"); 4321 E = Parser.getTok().getEndLoc(); 4322 Parser.Lex(); // Eat right bracket token. 4323 4324 // Don't worry about range checking the value here. That's handled by 4325 // the is*() predicates. 4326 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 4327 ARM_AM::no_shift, 0, 0, 4328 false, S, E)); 4329 4330 // If there's a pre-indexing writeback marker, '!', just add it as a token 4331 // operand. 4332 if (Parser.getTok().is(AsmToken::Exclaim)) { 4333 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4334 Parser.Lex(); // Eat the '!'. 4335 } 4336 4337 return false; 4338 } 4339 4340 // The register offset is optionally preceded by a '+' or '-' 4341 bool isNegative = false; 4342 if (Parser.getTok().is(AsmToken::Minus)) { 4343 isNegative = true; 4344 Parser.Lex(); // Eat the '-'. 4345 } else if (Parser.getTok().is(AsmToken::Plus)) { 4346 // Nothing to do. 4347 Parser.Lex(); // Eat the '+'. 4348 } 4349 4350 E = Parser.getTok().getLoc(); 4351 int OffsetRegNum = tryParseRegister(); 4352 if (OffsetRegNum == -1) 4353 return Error(E, "register expected"); 4354 4355 // If there's a shift operator, handle it. 4356 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 4357 unsigned ShiftImm = 0; 4358 if (Parser.getTok().is(AsmToken::Comma)) { 4359 Parser.Lex(); // Eat the ','. 4360 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 4361 return true; 4362 } 4363 4364 // Now we should have the closing ']' 4365 if (Parser.getTok().isNot(AsmToken::RBrac)) 4366 return Error(Parser.getTok().getLoc(), "']' expected"); 4367 E = Parser.getTok().getEndLoc(); 4368 Parser.Lex(); // Eat right bracket token. 4369 4370 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 4371 ShiftType, ShiftImm, 0, isNegative, 4372 S, E)); 4373 4374 // If there's a pre-indexing writeback marker, '!', just add it as a token 4375 // operand. 4376 if (Parser.getTok().is(AsmToken::Exclaim)) { 4377 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4378 Parser.Lex(); // Eat the '!'. 4379 } 4380 4381 return false; 4382 } 4383 4384 /// parseMemRegOffsetShift - one of these two: 4385 /// ( lsl | lsr | asr | ror ) , # shift_amount 4386 /// rrx 4387 /// return true if it parses a shift otherwise it returns false. 4388 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 4389 unsigned &Amount) { 4390 SMLoc Loc = Parser.getTok().getLoc(); 4391 const AsmToken &Tok = Parser.getTok(); 4392 if (Tok.isNot(AsmToken::Identifier)) 4393 return true; 4394 StringRef ShiftName = Tok.getString(); 4395 if (ShiftName == "lsl" || ShiftName == "LSL" || 4396 ShiftName == "asl" || ShiftName == "ASL") 4397 St = ARM_AM::lsl; 4398 else if (ShiftName == "lsr" || ShiftName == "LSR") 4399 St = ARM_AM::lsr; 4400 else if (ShiftName == "asr" || ShiftName == "ASR") 4401 St = ARM_AM::asr; 4402 else if (ShiftName == "ror" || ShiftName == "ROR") 4403 St = ARM_AM::ror; 4404 else if (ShiftName == "rrx" || ShiftName == "RRX") 4405 St = ARM_AM::rrx; 4406 else 4407 return Error(Loc, "illegal shift operator"); 4408 Parser.Lex(); // Eat shift type token. 4409 4410 // rrx stands alone. 4411 Amount = 0; 4412 if (St != ARM_AM::rrx) { 4413 Loc = Parser.getTok().getLoc(); 4414 // A '#' and a shift amount. 4415 const AsmToken &HashTok = Parser.getTok(); 4416 if (HashTok.isNot(AsmToken::Hash) && 4417 HashTok.isNot(AsmToken::Dollar)) 4418 return Error(HashTok.getLoc(), "'#' expected"); 4419 Parser.Lex(); // Eat hash token. 4420 4421 const MCExpr *Expr; 4422 if (getParser().parseExpression(Expr)) 4423 return true; 4424 // Range check the immediate. 4425 // lsl, ror: 0 <= imm <= 31 4426 // lsr, asr: 0 <= imm <= 32 4427 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 4428 if (!CE) 4429 return Error(Loc, "shift amount must be an immediate"); 4430 int64_t Imm = CE->getValue(); 4431 if (Imm < 0 || 4432 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 4433 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 4434 return Error(Loc, "immediate shift value out of range"); 4435 // If <ShiftTy> #0, turn it into a no_shift. 4436 if (Imm == 0) 4437 St = ARM_AM::lsl; 4438 // For consistency, treat lsr #32 and asr #32 as having immediate value 0. 4439 if (Imm == 32) 4440 Imm = 0; 4441 Amount = Imm; 4442 } 4443 4444 return false; 4445 } 4446 4447 /// parseFPImm - A floating point immediate expression operand. 4448 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 4449 parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4450 // Anything that can accept a floating point constant as an operand 4451 // needs to go through here, as the regular parseExpression is 4452 // integer only. 4453 // 4454 // This routine still creates a generic Immediate operand, containing 4455 // a bitcast of the 64-bit floating point value. The various operands 4456 // that accept floats can check whether the value is valid for them 4457 // via the standard is*() predicates. 4458 4459 SMLoc S = Parser.getTok().getLoc(); 4460 4461 if (Parser.getTok().isNot(AsmToken::Hash) && 4462 Parser.getTok().isNot(AsmToken::Dollar)) 4463 return MatchOperand_NoMatch; 4464 4465 // Disambiguate the VMOV forms that can accept an FP immediate. 4466 // vmov.f32 <sreg>, #imm 4467 // vmov.f64 <dreg>, #imm 4468 // vmov.f32 <dreg>, #imm @ vector f32x2 4469 // vmov.f32 <qreg>, #imm @ vector f32x4 4470 // 4471 // There are also the NEON VMOV instructions which expect an 4472 // integer constant. Make sure we don't try to parse an FPImm 4473 // for these: 4474 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 4475 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 4476 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 4477 TyOp->getToken() != ".f64")) 4478 return MatchOperand_NoMatch; 4479 4480 Parser.Lex(); // Eat '#' or '$'. 4481 4482 // Handle negation, as that still comes through as a separate token. 4483 bool isNegative = false; 4484 if (Parser.getTok().is(AsmToken::Minus)) { 4485 isNegative = true; 4486 Parser.Lex(); 4487 } 4488 const AsmToken &Tok = Parser.getTok(); 4489 SMLoc Loc = Tok.getLoc(); 4490 if (Tok.is(AsmToken::Real)) { 4491 APFloat RealVal(APFloat::IEEEsingle, Tok.getString()); 4492 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 4493 // If we had a '-' in front, toggle the sign bit. 4494 IntVal ^= (uint64_t)isNegative << 31; 4495 Parser.Lex(); // Eat the token. 4496 Operands.push_back(ARMOperand::CreateImm( 4497 MCConstantExpr::Create(IntVal, getContext()), 4498 S, Parser.getTok().getLoc())); 4499 return MatchOperand_Success; 4500 } 4501 // Also handle plain integers. Instructions which allow floating point 4502 // immediates also allow a raw encoded 8-bit value. 4503 if (Tok.is(AsmToken::Integer)) { 4504 int64_t Val = Tok.getIntVal(); 4505 Parser.Lex(); // Eat the token. 4506 if (Val > 255 || Val < 0) { 4507 Error(Loc, "encoded floating point value out of range"); 4508 return MatchOperand_ParseFail; 4509 } 4510 double RealVal = ARM_AM::getFPImmFloat(Val); 4511 Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue(); 4512 Operands.push_back(ARMOperand::CreateImm( 4513 MCConstantExpr::Create(Val, getContext()), S, 4514 Parser.getTok().getLoc())); 4515 return MatchOperand_Success; 4516 } 4517 4518 Error(Loc, "invalid floating point immediate"); 4519 return MatchOperand_ParseFail; 4520 } 4521 4522 /// Parse a arm instruction operand. For now this parses the operand regardless 4523 /// of the mnemonic. 4524 bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4525 StringRef Mnemonic) { 4526 SMLoc S, E; 4527 4528 // Check if the current operand has a custom associated parser, if so, try to 4529 // custom parse the operand, or fallback to the general approach. 4530 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 4531 if (ResTy == MatchOperand_Success) 4532 return false; 4533 // If there wasn't a custom match, try the generic matcher below. Otherwise, 4534 // there was a match, but an error occurred, in which case, just return that 4535 // the operand parsing failed. 4536 if (ResTy == MatchOperand_ParseFail) 4537 return true; 4538 4539 switch (getLexer().getKind()) { 4540 default: 4541 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 4542 return true; 4543 case AsmToken::Identifier: { 4544 // If we've seen a branch mnemonic, the next operand must be a label. This 4545 // is true even if the label is a register name. So "br r1" means branch to 4546 // label "r1". 4547 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl"; 4548 if (!ExpectLabel) { 4549 if (!tryParseRegisterWithWriteBack(Operands)) 4550 return false; 4551 int Res = tryParseShiftRegister(Operands); 4552 if (Res == 0) // success 4553 return false; 4554 else if (Res == -1) // irrecoverable error 4555 return true; 4556 // If this is VMRS, check for the apsr_nzcv operand. 4557 if (Mnemonic == "vmrs" && 4558 Parser.getTok().getString().equals_lower("apsr_nzcv")) { 4559 S = Parser.getTok().getLoc(); 4560 Parser.Lex(); 4561 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S)); 4562 return false; 4563 } 4564 } 4565 4566 // Fall though for the Identifier case that is not a register or a 4567 // special name. 4568 } 4569 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 4570 case AsmToken::Integer: // things like 1f and 2b as a branch targets 4571 case AsmToken::String: // quoted label names. 4572 case AsmToken::Dot: { // . as a branch target 4573 // This was not a register so parse other operands that start with an 4574 // identifier (like labels) as expressions and create them as immediates. 4575 const MCExpr *IdVal; 4576 S = Parser.getTok().getLoc(); 4577 if (getParser().parseExpression(IdVal)) 4578 return true; 4579 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4580 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 4581 return false; 4582 } 4583 case AsmToken::LBrac: 4584 return parseMemory(Operands); 4585 case AsmToken::LCurly: 4586 return parseRegisterList(Operands); 4587 case AsmToken::Dollar: 4588 case AsmToken::Hash: { 4589 // #42 -> immediate. 4590 S = Parser.getTok().getLoc(); 4591 Parser.Lex(); 4592 4593 if (Parser.getTok().isNot(AsmToken::Colon)) { 4594 bool isNegative = Parser.getTok().is(AsmToken::Minus); 4595 const MCExpr *ImmVal; 4596 if (getParser().parseExpression(ImmVal)) 4597 return true; 4598 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 4599 if (CE) { 4600 int32_t Val = CE->getValue(); 4601 if (isNegative && Val == 0) 4602 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 4603 } 4604 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4605 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 4606 4607 // There can be a trailing '!' on operands that we want as a separate 4608 // '!' Token operand. Handle that here. For example, the compatibilty 4609 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'. 4610 if (Parser.getTok().is(AsmToken::Exclaim)) { 4611 Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(), 4612 Parser.getTok().getLoc())); 4613 Parser.Lex(); // Eat exclaim token 4614 } 4615 return false; 4616 } 4617 // w/ a ':' after the '#', it's just like a plain ':'. 4618 // FALLTHROUGH 4619 } 4620 case AsmToken::Colon: { 4621 // ":lower16:" and ":upper16:" expression prefixes 4622 // FIXME: Check it's an expression prefix, 4623 // e.g. (FOO - :lower16:BAR) isn't legal. 4624 ARMMCExpr::VariantKind RefKind; 4625 if (parsePrefix(RefKind)) 4626 return true; 4627 4628 const MCExpr *SubExprVal; 4629 if (getParser().parseExpression(SubExprVal)) 4630 return true; 4631 4632 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 4633 getContext()); 4634 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4635 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 4636 return false; 4637 } 4638 } 4639 } 4640 4641 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 4642 // :lower16: and :upper16:. 4643 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 4644 RefKind = ARMMCExpr::VK_ARM_None; 4645 4646 // :lower16: and :upper16: modifiers 4647 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 4648 Parser.Lex(); // Eat ':' 4649 4650 if (getLexer().isNot(AsmToken::Identifier)) { 4651 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 4652 return true; 4653 } 4654 4655 StringRef IDVal = Parser.getTok().getIdentifier(); 4656 if (IDVal == "lower16") { 4657 RefKind = ARMMCExpr::VK_ARM_LO16; 4658 } else if (IDVal == "upper16") { 4659 RefKind = ARMMCExpr::VK_ARM_HI16; 4660 } else { 4661 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 4662 return true; 4663 } 4664 Parser.Lex(); 4665 4666 if (getLexer().isNot(AsmToken::Colon)) { 4667 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 4668 return true; 4669 } 4670 Parser.Lex(); // Eat the last ':' 4671 return false; 4672 } 4673 4674 /// \brief Given a mnemonic, split out possible predication code and carry 4675 /// setting letters to form a canonical mnemonic and flags. 4676 // 4677 // FIXME: Would be nice to autogen this. 4678 // FIXME: This is a bit of a maze of special cases. 4679 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 4680 unsigned &PredicationCode, 4681 bool &CarrySetting, 4682 unsigned &ProcessorIMod, 4683 StringRef &ITMask) { 4684 PredicationCode = ARMCC::AL; 4685 CarrySetting = false; 4686 ProcessorIMod = 0; 4687 4688 // Ignore some mnemonics we know aren't predicated forms. 4689 // 4690 // FIXME: Would be nice to autogen this. 4691 if ((Mnemonic == "movs" && isThumb()) || 4692 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 4693 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 4694 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 4695 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 4696 Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" || 4697 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 4698 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 4699 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || 4700 Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || 4701 Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || 4702 Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" || 4703 Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic.startswith("vsel")) 4704 return Mnemonic; 4705 4706 // First, split out any predication code. Ignore mnemonics we know aren't 4707 // predicated but do have a carry-set and so weren't caught above. 4708 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 4709 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 4710 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 4711 Mnemonic != "sbcs" && Mnemonic != "rscs") { 4712 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 4713 .Case("eq", ARMCC::EQ) 4714 .Case("ne", ARMCC::NE) 4715 .Case("hs", ARMCC::HS) 4716 .Case("cs", ARMCC::HS) 4717 .Case("lo", ARMCC::LO) 4718 .Case("cc", ARMCC::LO) 4719 .Case("mi", ARMCC::MI) 4720 .Case("pl", ARMCC::PL) 4721 .Case("vs", ARMCC::VS) 4722 .Case("vc", ARMCC::VC) 4723 .Case("hi", ARMCC::HI) 4724 .Case("ls", ARMCC::LS) 4725 .Case("ge", ARMCC::GE) 4726 .Case("lt", ARMCC::LT) 4727 .Case("gt", ARMCC::GT) 4728 .Case("le", ARMCC::LE) 4729 .Case("al", ARMCC::AL) 4730 .Default(~0U); 4731 if (CC != ~0U) { 4732 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 4733 PredicationCode = CC; 4734 } 4735 } 4736 4737 // Next, determine if we have a carry setting bit. We explicitly ignore all 4738 // the instructions we know end in 's'. 4739 if (Mnemonic.endswith("s") && 4740 !(Mnemonic == "cps" || Mnemonic == "mls" || 4741 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 4742 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 4743 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 4744 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" || 4745 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" || 4746 Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" || 4747 Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || 4748 Mnemonic == "vfms" || Mnemonic == "vfnms" || 4749 (Mnemonic == "movs" && isThumb()))) { 4750 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 4751 CarrySetting = true; 4752 } 4753 4754 // The "cps" instruction can have a interrupt mode operand which is glued into 4755 // the mnemonic. Check if this is the case, split it and parse the imod op 4756 if (Mnemonic.startswith("cps")) { 4757 // Split out any imod code. 4758 unsigned IMod = 4759 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 4760 .Case("ie", ARM_PROC::IE) 4761 .Case("id", ARM_PROC::ID) 4762 .Default(~0U); 4763 if (IMod != ~0U) { 4764 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 4765 ProcessorIMod = IMod; 4766 } 4767 } 4768 4769 // The "it" instruction has the condition mask on the end of the mnemonic. 4770 if (Mnemonic.startswith("it")) { 4771 ITMask = Mnemonic.slice(2, Mnemonic.size()); 4772 Mnemonic = Mnemonic.slice(0, 2); 4773 } 4774 4775 return Mnemonic; 4776 } 4777 4778 /// \brief Given a canonical mnemonic, determine if the instruction ever allows 4779 /// inclusion of carry set or predication code operands. 4780 // 4781 // FIXME: It would be nice to autogen this. 4782 void ARMAsmParser:: 4783 getMnemonicAcceptInfo(StringRef Mnemonic, StringRef FullInst, 4784 bool &CanAcceptCarrySet, bool &CanAcceptPredicationCode) { 4785 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 4786 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 4787 Mnemonic == "add" || Mnemonic == "adc" || 4788 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 4789 Mnemonic == "orr" || Mnemonic == "mvn" || 4790 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 4791 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 4792 Mnemonic == "vfm" || Mnemonic == "vfnm" || 4793 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 4794 Mnemonic == "mla" || Mnemonic == "smlal" || 4795 Mnemonic == "umlal" || Mnemonic == "umull"))) { 4796 CanAcceptCarrySet = true; 4797 } else 4798 CanAcceptCarrySet = false; 4799 4800 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" || 4801 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" || 4802 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic.startswith("crc32") || 4803 Mnemonic.startswith("cps") || Mnemonic.startswith("vsel") || 4804 Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || Mnemonic == "vcvta" || 4805 Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || 4806 Mnemonic == "vrinta" || Mnemonic == "vrintn" || Mnemonic == "vrintp" || 4807 Mnemonic == "vrintm" || Mnemonic.startswith("aes") || 4808 Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") || 4809 (FullInst.startswith("vmull") && FullInst.endswith(".p64"))) { 4810 // These mnemonics are never predicable 4811 CanAcceptPredicationCode = false; 4812 } else if (!isThumb()) { 4813 // Some instructions are only predicable in Thumb mode 4814 CanAcceptPredicationCode 4815 = Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" && 4816 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" && 4817 Mnemonic != "dmb" && Mnemonic != "dsb" && Mnemonic != "isb" && 4818 Mnemonic != "pld" && Mnemonic != "pli" && Mnemonic != "pldw" && 4819 Mnemonic != "ldc2" && Mnemonic != "ldc2l" && 4820 Mnemonic != "stc2" && Mnemonic != "stc2l" && 4821 !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs"); 4822 } else if (isThumbOne()) { 4823 if (hasV6MOps()) 4824 CanAcceptPredicationCode = Mnemonic != "movs"; 4825 else 4826 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs"; 4827 } else 4828 CanAcceptPredicationCode = true; 4829 } 4830 4831 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 4832 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4833 // FIXME: This is all horribly hacky. We really need a better way to deal 4834 // with optional operands like this in the matcher table. 4835 4836 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 4837 // another does not. Specifically, the MOVW instruction does not. So we 4838 // special case it here and remove the defaulted (non-setting) cc_out 4839 // operand if that's the instruction we're trying to match. 4840 // 4841 // We do this as post-processing of the explicit operands rather than just 4842 // conditionally adding the cc_out in the first place because we need 4843 // to check the type of the parsed immediate operand. 4844 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 4845 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 4846 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 4847 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4848 return true; 4849 4850 // Register-register 'add' for thumb does not have a cc_out operand 4851 // when there are only two register operands. 4852 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 4853 static_cast<ARMOperand*>(Operands[3])->isReg() && 4854 static_cast<ARMOperand*>(Operands[4])->isReg() && 4855 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4856 return true; 4857 // Register-register 'add' for thumb does not have a cc_out operand 4858 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 4859 // have to check the immediate range here since Thumb2 has a variant 4860 // that can handle a different range and has a cc_out operand. 4861 if (((isThumb() && Mnemonic == "add") || 4862 (isThumbTwo() && Mnemonic == "sub")) && 4863 Operands.size() == 6 && 4864 static_cast<ARMOperand*>(Operands[3])->isReg() && 4865 static_cast<ARMOperand*>(Operands[4])->isReg() && 4866 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 4867 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4868 ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) || 4869 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 4870 return true; 4871 // For Thumb2, add/sub immediate does not have a cc_out operand for the 4872 // imm0_4095 variant. That's the least-preferred variant when 4873 // selecting via the generic "add" mnemonic, so to know that we 4874 // should remove the cc_out operand, we have to explicitly check that 4875 // it's not one of the other variants. Ugh. 4876 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 4877 Operands.size() == 6 && 4878 static_cast<ARMOperand*>(Operands[3])->isReg() && 4879 static_cast<ARMOperand*>(Operands[4])->isReg() && 4880 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4881 // Nest conditions rather than one big 'if' statement for readability. 4882 // 4883 // If both registers are low, we're in an IT block, and the immediate is 4884 // in range, we should use encoding T1 instead, which has a cc_out. 4885 if (inITBlock() && 4886 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4887 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 4888 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 4889 return false; 4890 // Check against T3. If the second register is the PC, this is an 4891 // alternate form of ADR, which uses encoding T4, so check for that too. 4892 if (static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC && 4893 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 4894 return false; 4895 4896 // Otherwise, we use encoding T4, which does not have a cc_out 4897 // operand. 4898 return true; 4899 } 4900 4901 // The thumb2 multiply instruction doesn't have a CCOut register, so 4902 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 4903 // use the 16-bit encoding or not. 4904 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4905 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4906 static_cast<ARMOperand*>(Operands[3])->isReg() && 4907 static_cast<ARMOperand*>(Operands[4])->isReg() && 4908 static_cast<ARMOperand*>(Operands[5])->isReg() && 4909 // If the registers aren't low regs, the destination reg isn't the 4910 // same as one of the source regs, or the cc_out operand is zero 4911 // outside of an IT block, we have to use the 32-bit encoding, so 4912 // remove the cc_out operand. 4913 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4914 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4915 !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) || 4916 !inITBlock() || 4917 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4918 static_cast<ARMOperand*>(Operands[5])->getReg() && 4919 static_cast<ARMOperand*>(Operands[3])->getReg() != 4920 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4921 return true; 4922 4923 // Also check the 'mul' syntax variant that doesn't specify an explicit 4924 // destination register. 4925 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && 4926 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4927 static_cast<ARMOperand*>(Operands[3])->isReg() && 4928 static_cast<ARMOperand*>(Operands[4])->isReg() && 4929 // If the registers aren't low regs or the cc_out operand is zero 4930 // outside of an IT block, we have to use the 32-bit encoding, so 4931 // remove the cc_out operand. 4932 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4933 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4934 !inITBlock())) 4935 return true; 4936 4937 4938 4939 // Register-register 'add/sub' for thumb does not have a cc_out operand 4940 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4941 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4942 // right, this will result in better diagnostics (which operand is off) 4943 // anyway. 4944 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4945 (Operands.size() == 5 || Operands.size() == 6) && 4946 static_cast<ARMOperand*>(Operands[3])->isReg() && 4947 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4948 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4949 (static_cast<ARMOperand*>(Operands[4])->isImm() || 4950 (Operands.size() == 6 && 4951 static_cast<ARMOperand*>(Operands[5])->isImm()))) 4952 return true; 4953 4954 return false; 4955 } 4956 4957 bool ARMAsmParser::shouldOmitPredicateOperand( 4958 StringRef Mnemonic, SmallVectorImpl<MCParsedAsmOperand *> &Operands) { 4959 // VRINT{Z, R, X} have a predicate operand in VFP, but not in NEON 4960 unsigned RegIdx = 3; 4961 if ((Mnemonic == "vrintz" || Mnemonic == "vrintx" || Mnemonic == "vrintr") && 4962 static_cast<ARMOperand *>(Operands[2])->getToken() == ".f32") { 4963 if (static_cast<ARMOperand *>(Operands[3])->isToken() && 4964 static_cast<ARMOperand *>(Operands[3])->getToken() == ".f32") 4965 RegIdx = 4; 4966 4967 if (static_cast<ARMOperand *>(Operands[RegIdx])->isReg() && 4968 (ARMMCRegisterClasses[ARM::DPRRegClassID] 4969 .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg()) || 4970 ARMMCRegisterClasses[ARM::QPRRegClassID] 4971 .contains(static_cast<ARMOperand *>(Operands[RegIdx])->getReg()))) 4972 return true; 4973 } 4974 return false; 4975 } 4976 4977 static bool isDataTypeToken(StringRef Tok) { 4978 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || 4979 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || 4980 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || 4981 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || 4982 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || 4983 Tok == ".f" || Tok == ".d"; 4984 } 4985 4986 // FIXME: This bit should probably be handled via an explicit match class 4987 // in the .td files that matches the suffix instead of having it be 4988 // a literal string token the way it is now. 4989 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { 4990 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); 4991 } 4992 static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features, 4993 unsigned VariantID); 4994 /// Parse an arm instruction mnemonic followed by its operands. 4995 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, 4996 SMLoc NameLoc, 4997 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4998 // Apply mnemonic aliases before doing anything else, as the destination 4999 // mnemnonic may include suffices and we want to handle them normally. 5000 // The generic tblgen'erated code does this later, at the start of 5001 // MatchInstructionImpl(), but that's too late for aliases that include 5002 // any sort of suffix. 5003 unsigned AvailableFeatures = getAvailableFeatures(); 5004 unsigned AssemblerDialect = getParser().getAssemblerDialect(); 5005 applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect); 5006 5007 // First check for the ARM-specific .req directive. 5008 if (Parser.getTok().is(AsmToken::Identifier) && 5009 Parser.getTok().getIdentifier() == ".req") { 5010 parseDirectiveReq(Name, NameLoc); 5011 // We always return 'error' for this, as we're done with this 5012 // statement and don't need to match the 'instruction." 5013 return true; 5014 } 5015 5016 // Create the leading tokens for the mnemonic, split by '.' characters. 5017 size_t Start = 0, Next = Name.find('.'); 5018 StringRef Mnemonic = Name.slice(Start, Next); 5019 5020 // Split out the predication code and carry setting flag from the mnemonic. 5021 unsigned PredicationCode; 5022 unsigned ProcessorIMod; 5023 bool CarrySetting; 5024 StringRef ITMask; 5025 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 5026 ProcessorIMod, ITMask); 5027 5028 // In Thumb1, only the branch (B) instruction can be predicated. 5029 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 5030 Parser.eatToEndOfStatement(); 5031 return Error(NameLoc, "conditional execution not supported in Thumb1"); 5032 } 5033 5034 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 5035 5036 // Handle the IT instruction ITMask. Convert it to a bitmask. This 5037 // is the mask as it will be for the IT encoding if the conditional 5038 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 5039 // where the conditional bit0 is zero, the instruction post-processing 5040 // will adjust the mask accordingly. 5041 if (Mnemonic == "it") { 5042 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 5043 if (ITMask.size() > 3) { 5044 Parser.eatToEndOfStatement(); 5045 return Error(Loc, "too many conditions on IT instruction"); 5046 } 5047 unsigned Mask = 8; 5048 for (unsigned i = ITMask.size(); i != 0; --i) { 5049 char pos = ITMask[i - 1]; 5050 if (pos != 't' && pos != 'e') { 5051 Parser.eatToEndOfStatement(); 5052 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 5053 } 5054 Mask >>= 1; 5055 if (ITMask[i - 1] == 't') 5056 Mask |= 8; 5057 } 5058 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 5059 } 5060 5061 // FIXME: This is all a pretty gross hack. We should automatically handle 5062 // optional operands like this via tblgen. 5063 5064 // Next, add the CCOut and ConditionCode operands, if needed. 5065 // 5066 // For mnemonics which can ever incorporate a carry setting bit or predication 5067 // code, our matching model involves us always generating CCOut and 5068 // ConditionCode operands to match the mnemonic "as written" and then we let 5069 // the matcher deal with finding the right instruction or generating an 5070 // appropriate error. 5071 bool CanAcceptCarrySet, CanAcceptPredicationCode; 5072 getMnemonicAcceptInfo(Mnemonic, Name, CanAcceptCarrySet, CanAcceptPredicationCode); 5073 5074 // If we had a carry-set on an instruction that can't do that, issue an 5075 // error. 5076 if (!CanAcceptCarrySet && CarrySetting) { 5077 Parser.eatToEndOfStatement(); 5078 return Error(NameLoc, "instruction '" + Mnemonic + 5079 "' can not set flags, but 's' suffix specified"); 5080 } 5081 // If we had a predication code on an instruction that can't do that, issue an 5082 // error. 5083 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 5084 Parser.eatToEndOfStatement(); 5085 return Error(NameLoc, "instruction '" + Mnemonic + 5086 "' is not predicable, but condition code specified"); 5087 } 5088 5089 // Add the carry setting operand, if necessary. 5090 if (CanAcceptCarrySet) { 5091 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 5092 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 5093 Loc)); 5094 } 5095 5096 // Add the predication code operand, if necessary. 5097 if (CanAcceptPredicationCode) { 5098 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 5099 CarrySetting); 5100 Operands.push_back(ARMOperand::CreateCondCode( 5101 ARMCC::CondCodes(PredicationCode), Loc)); 5102 } 5103 5104 // Add the processor imod operand, if necessary. 5105 if (ProcessorIMod) { 5106 Operands.push_back(ARMOperand::CreateImm( 5107 MCConstantExpr::Create(ProcessorIMod, getContext()), 5108 NameLoc, NameLoc)); 5109 } 5110 5111 // Add the remaining tokens in the mnemonic. 5112 while (Next != StringRef::npos) { 5113 Start = Next; 5114 Next = Name.find('.', Start + 1); 5115 StringRef ExtraToken = Name.slice(Start, Next); 5116 5117 // Some NEON instructions have an optional datatype suffix that is 5118 // completely ignored. Check for that. 5119 if (isDataTypeToken(ExtraToken) && 5120 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) 5121 continue; 5122 5123 // For for ARM mode generate an error if the .n qualifier is used. 5124 if (ExtraToken == ".n" && !isThumb()) { 5125 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 5126 return Error(Loc, "instruction with .n (narrow) qualifier not allowed in " 5127 "arm mode"); 5128 } 5129 5130 // The .n qualifier is always discarded as that is what the tables 5131 // and matcher expect. In ARM mode the .w qualifier has no effect, 5132 // so discard it to avoid errors that can be caused by the matcher. 5133 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) { 5134 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 5135 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 5136 } 5137 } 5138 5139 // Read the remaining operands. 5140 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5141 // Read the first operand. 5142 if (parseOperand(Operands, Mnemonic)) { 5143 Parser.eatToEndOfStatement(); 5144 return true; 5145 } 5146 5147 while (getLexer().is(AsmToken::Comma)) { 5148 Parser.Lex(); // Eat the comma. 5149 5150 // Parse and remember the operand. 5151 if (parseOperand(Operands, Mnemonic)) { 5152 Parser.eatToEndOfStatement(); 5153 return true; 5154 } 5155 } 5156 } 5157 5158 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5159 SMLoc Loc = getLexer().getLoc(); 5160 Parser.eatToEndOfStatement(); 5161 return Error(Loc, "unexpected token in argument list"); 5162 } 5163 5164 Parser.Lex(); // Consume the EndOfStatement 5165 5166 // Some instructions, mostly Thumb, have forms for the same mnemonic that 5167 // do and don't have a cc_out optional-def operand. With some spot-checks 5168 // of the operand list, we can figure out which variant we're trying to 5169 // parse and adjust accordingly before actually matching. We shouldn't ever 5170 // try to remove a cc_out operand that was explicitly set on the the 5171 // mnemonic, of course (CarrySetting == true). Reason number #317 the 5172 // table driven matcher doesn't fit well with the ARM instruction set. 5173 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 5174 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 5175 Operands.erase(Operands.begin() + 1); 5176 delete Op; 5177 } 5178 5179 // Some instructions have the same mnemonic, but don't always 5180 // have a predicate. Distinguish them here and delete the 5181 // predicate if needed. 5182 if (shouldOmitPredicateOperand(Mnemonic, Operands)) { 5183 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 5184 Operands.erase(Operands.begin() + 1); 5185 delete Op; 5186 } 5187 5188 // ARM mode 'blx' need special handling, as the register operand version 5189 // is predicable, but the label operand version is not. So, we can't rely 5190 // on the Mnemonic based checking to correctly figure out when to put 5191 // a k_CondCode operand in the list. If we're trying to match the label 5192 // version, remove the k_CondCode operand here. 5193 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 5194 static_cast<ARMOperand*>(Operands[2])->isImm()) { 5195 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 5196 Operands.erase(Operands.begin() + 1); 5197 delete Op; 5198 } 5199 5200 // Adjust operands of ldrexd/strexd to MCK_GPRPair. 5201 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint, 5202 // a single GPRPair reg operand is used in the .td file to replace the two 5203 // GPRs. However, when parsing from asm, the two GRPs cannot be automatically 5204 // expressed as a GPRPair, so we have to manually merge them. 5205 // FIXME: We would really like to be able to tablegen'erate this. 5206 if (!isThumb() && Operands.size() > 4 && 5207 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" || 5208 Mnemonic == "stlexd")) { 5209 bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd"); 5210 unsigned Idx = isLoad ? 2 : 3; 5211 ARMOperand* Op1 = static_cast<ARMOperand*>(Operands[Idx]); 5212 ARMOperand* Op2 = static_cast<ARMOperand*>(Operands[Idx+1]); 5213 5214 const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID); 5215 // Adjust only if Op1 and Op2 are GPRs. 5216 if (Op1->isReg() && Op2->isReg() && MRC.contains(Op1->getReg()) && 5217 MRC.contains(Op2->getReg())) { 5218 unsigned Reg1 = Op1->getReg(); 5219 unsigned Reg2 = Op2->getReg(); 5220 unsigned Rt = MRI->getEncodingValue(Reg1); 5221 unsigned Rt2 = MRI->getEncodingValue(Reg2); 5222 5223 // Rt2 must be Rt + 1 and Rt must be even. 5224 if (Rt + 1 != Rt2 || (Rt & 1)) { 5225 Error(Op2->getStartLoc(), isLoad ? 5226 "destination operands must be sequential" : 5227 "source operands must be sequential"); 5228 return true; 5229 } 5230 unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0, 5231 &(MRI->getRegClass(ARM::GPRPairRegClassID))); 5232 Operands.erase(Operands.begin() + Idx, Operands.begin() + Idx + 2); 5233 Operands.insert(Operands.begin() + Idx, ARMOperand::CreateReg( 5234 NewReg, Op1->getStartLoc(), Op2->getEndLoc())); 5235 delete Op1; 5236 delete Op2; 5237 } 5238 } 5239 5240 // FIXME: As said above, this is all a pretty gross hack. This instruction 5241 // does not fit with other "subs" and tblgen. 5242 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction 5243 // so the Mnemonic is the original name "subs" and delete the predicate 5244 // operand so it will match the table entry. 5245 if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 && 5246 static_cast<ARMOperand*>(Operands[3])->isReg() && 5247 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::PC && 5248 static_cast<ARMOperand*>(Operands[4])->isReg() && 5249 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::LR && 5250 static_cast<ARMOperand*>(Operands[5])->isImm()) { 5251 ARMOperand *Op0 = static_cast<ARMOperand*>(Operands[0]); 5252 Operands.erase(Operands.begin()); 5253 delete Op0; 5254 Operands.insert(Operands.begin(), ARMOperand::CreateToken(Name, NameLoc)); 5255 5256 ARMOperand *Op1 = static_cast<ARMOperand*>(Operands[1]); 5257 Operands.erase(Operands.begin() + 1); 5258 delete Op1; 5259 } 5260 return false; 5261 } 5262 5263 // Validate context-sensitive operand constraints. 5264 5265 // return 'true' if register list contains non-low GPR registers, 5266 // 'false' otherwise. If Reg is in the register list or is HiReg, set 5267 // 'containsReg' to true. 5268 static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 5269 unsigned HiReg, bool &containsReg) { 5270 containsReg = false; 5271 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 5272 unsigned OpReg = Inst.getOperand(i).getReg(); 5273 if (OpReg == Reg) 5274 containsReg = true; 5275 // Anything other than a low register isn't legal here. 5276 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 5277 return true; 5278 } 5279 return false; 5280 } 5281 5282 // Check if the specified regisgter is in the register list of the inst, 5283 // starting at the indicated operand number. 5284 static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 5285 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 5286 unsigned OpReg = Inst.getOperand(i).getReg(); 5287 if (OpReg == Reg) 5288 return true; 5289 } 5290 return false; 5291 } 5292 5293 // Return true if instruction has the interesting property of being 5294 // allowed in IT blocks, but not being predicable. 5295 static bool instIsBreakpoint(const MCInst &Inst) { 5296 return Inst.getOpcode() == ARM::tBKPT || 5297 Inst.getOpcode() == ARM::BKPT || 5298 Inst.getOpcode() == ARM::tHLT || 5299 Inst.getOpcode() == ARM::HLT; 5300 5301 } 5302 5303 // FIXME: We would really like to be able to tablegen'erate this. 5304 bool ARMAsmParser:: 5305 validateInstruction(MCInst &Inst, 5306 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 5307 const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); 5308 SMLoc Loc = Operands[0]->getStartLoc(); 5309 5310 // Check the IT block state first. 5311 // NOTE: BKPT and HLT instructions have the interesting property of being 5312 // allowed in IT blocks, but not being predicable. They just always execute. 5313 if (inITBlock() && !instIsBreakpoint(Inst)) { 5314 unsigned Bit = 1; 5315 if (ITState.FirstCond) 5316 ITState.FirstCond = false; 5317 else 5318 Bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 5319 // The instruction must be predicable. 5320 if (!MCID.isPredicable()) 5321 return Error(Loc, "instructions in IT block must be predicable"); 5322 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 5323 unsigned ITCond = Bit ? ITState.Cond : 5324 ARMCC::getOppositeCondition(ITState.Cond); 5325 if (Cond != ITCond) { 5326 // Find the condition code Operand to get its SMLoc information. 5327 SMLoc CondLoc; 5328 for (unsigned I = 1; I < Operands.size(); ++I) 5329 if (static_cast<ARMOperand*>(Operands[I])->isCondCode()) 5330 CondLoc = Operands[I]->getStartLoc(); 5331 return Error(CondLoc, "incorrect condition in IT block; got '" + 5332 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 5333 "', but expected '" + 5334 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 5335 } 5336 // Check for non-'al' condition codes outside of the IT block. 5337 } else if (isThumbTwo() && MCID.isPredicable() && 5338 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 5339 ARMCC::AL && Inst.getOpcode() != ARM::tBcc && 5340 Inst.getOpcode() != ARM::t2Bcc) 5341 return Error(Loc, "predicated instructions must be in IT block"); 5342 5343 const unsigned Opcode = Inst.getOpcode(); 5344 switch (Opcode) { 5345 case ARM::LDRD: 5346 case ARM::LDRD_PRE: 5347 case ARM::LDRD_POST: { 5348 const unsigned RtReg = Inst.getOperand(0).getReg(); 5349 5350 // Rt can't be R14. 5351 if (RtReg == ARM::LR) 5352 return Error(Operands[3]->getStartLoc(), 5353 "Rt can't be R14"); 5354 5355 const unsigned Rt = MRI->getEncodingValue(RtReg); 5356 // Rt must be even-numbered. 5357 if ((Rt & 1) == 1) 5358 return Error(Operands[3]->getStartLoc(), 5359 "Rt must be even-numbered"); 5360 5361 // Rt2 must be Rt + 1. 5362 const unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg()); 5363 if (Rt2 != Rt + 1) 5364 return Error(Operands[3]->getStartLoc(), 5365 "destination operands must be sequential"); 5366 5367 if (Opcode == ARM::LDRD_PRE || Opcode == ARM::LDRD_POST) { 5368 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg()); 5369 // For addressing modes with writeback, the base register needs to be 5370 // different from the destination registers. 5371 if (Rn == Rt || Rn == Rt2) 5372 return Error(Operands[3]->getStartLoc(), 5373 "base register needs to be different from destination " 5374 "registers"); 5375 } 5376 5377 return false; 5378 } 5379 case ARM::t2LDRDi8: 5380 case ARM::t2LDRD_PRE: 5381 case ARM::t2LDRD_POST: { 5382 // Rt2 must be different from Rt. 5383 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg()); 5384 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg()); 5385 if (Rt2 == Rt) 5386 return Error(Operands[3]->getStartLoc(), 5387 "destination operands can't be identical"); 5388 return false; 5389 } 5390 case ARM::STRD: { 5391 // Rt2 must be Rt + 1. 5392 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg()); 5393 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg()); 5394 if (Rt2 != Rt + 1) 5395 return Error(Operands[3]->getStartLoc(), 5396 "source operands must be sequential"); 5397 return false; 5398 } 5399 case ARM::STRD_PRE: 5400 case ARM::STRD_POST: { 5401 // Rt2 must be Rt + 1. 5402 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg()); 5403 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg()); 5404 if (Rt2 != Rt + 1) 5405 return Error(Operands[3]->getStartLoc(), 5406 "source operands must be sequential"); 5407 return false; 5408 } 5409 case ARM::SBFX: 5410 case ARM::UBFX: { 5411 // Width must be in range [1, 32-lsb]. 5412 unsigned LSB = Inst.getOperand(2).getImm(); 5413 unsigned Widthm1 = Inst.getOperand(3).getImm(); 5414 if (Widthm1 >= 32 - LSB) 5415 return Error(Operands[5]->getStartLoc(), 5416 "bitfield width must be in range [1,32-lsb]"); 5417 return false; 5418 } 5419 case ARM::tLDMIA: { 5420 // If we're parsing Thumb2, the .w variant is available and handles 5421 // most cases that are normally illegal for a Thumb1 LDM instruction. 5422 // We'll make the transformation in processInstruction() if necessary. 5423 // 5424 // Thumb LDM instructions are writeback iff the base register is not 5425 // in the register list. 5426 unsigned Rn = Inst.getOperand(0).getReg(); 5427 bool HasWritebackToken = 5428 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5429 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 5430 bool ListContainsBase; 5431 if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo()) 5432 return Error(Operands[3 + HasWritebackToken]->getStartLoc(), 5433 "registers must be in range r0-r7"); 5434 // If we should have writeback, then there should be a '!' token. 5435 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo()) 5436 return Error(Operands[2]->getStartLoc(), 5437 "writeback operator '!' expected"); 5438 // If we should not have writeback, there must not be a '!'. This is 5439 // true even for the 32-bit wide encodings. 5440 if (ListContainsBase && HasWritebackToken) 5441 return Error(Operands[3]->getStartLoc(), 5442 "writeback operator '!' not allowed when base register " 5443 "in register list"); 5444 5445 break; 5446 } 5447 case ARM::t2LDMIA_UPD: { 5448 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 5449 return Error(Operands[4]->getStartLoc(), 5450 "writeback operator '!' not allowed when base register " 5451 "in register list"); 5452 break; 5453 } 5454 case ARM::tMUL: { 5455 // The second source operand must be the same register as the destination 5456 // operand. 5457 // 5458 // In this case, we must directly check the parsed operands because the 5459 // cvtThumbMultiply() function is written in such a way that it guarantees 5460 // this first statement is always true for the new Inst. Essentially, the 5461 // destination is unconditionally copied into the second source operand 5462 // without checking to see if it matches what we actually parsed. 5463 if (Operands.size() == 6 && 5464 (((ARMOperand*)Operands[3])->getReg() != 5465 ((ARMOperand*)Operands[5])->getReg()) && 5466 (((ARMOperand*)Operands[3])->getReg() != 5467 ((ARMOperand*)Operands[4])->getReg())) { 5468 return Error(Operands[3]->getStartLoc(), 5469 "destination register must match source register"); 5470 } 5471 break; 5472 } 5473 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 5474 // so only issue a diagnostic for thumb1. The instructions will be 5475 // switched to the t2 encodings in processInstruction() if necessary. 5476 case ARM::tPOP: { 5477 bool ListContainsBase; 5478 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) && 5479 !isThumbTwo()) 5480 return Error(Operands[2]->getStartLoc(), 5481 "registers must be in range r0-r7 or pc"); 5482 break; 5483 } 5484 case ARM::tPUSH: { 5485 bool ListContainsBase; 5486 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) && 5487 !isThumbTwo()) 5488 return Error(Operands[2]->getStartLoc(), 5489 "registers must be in range r0-r7 or lr"); 5490 break; 5491 } 5492 case ARM::tSTMIA_UPD: { 5493 bool ListContainsBase; 5494 if (checkLowRegisterList(Inst, 4, 0, 0, ListContainsBase) && !isThumbTwo()) 5495 return Error(Operands[4]->getStartLoc(), 5496 "registers must be in range r0-r7"); 5497 break; 5498 } 5499 case ARM::tADDrSP: { 5500 // If the non-SP source operand and the destination operand are not the 5501 // same, we need thumb2 (for the wide encoding), or we have an error. 5502 if (!isThumbTwo() && 5503 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) { 5504 return Error(Operands[4]->getStartLoc(), 5505 "source register must be the same as destination"); 5506 } 5507 break; 5508 } 5509 // Final range checking for Thumb unconditional branch instructions. 5510 case ARM::tB: 5511 if (!(static_cast<ARMOperand*>(Operands[2]))->isSignedOffset<11, 1>()) 5512 return Error(Operands[2]->getStartLoc(), "branch target out of range"); 5513 break; 5514 case ARM::t2B: { 5515 int op = (Operands[2]->isImm()) ? 2 : 3; 5516 if (!(static_cast<ARMOperand*>(Operands[op]))->isSignedOffset<24, 1>()) 5517 return Error(Operands[op]->getStartLoc(), "branch target out of range"); 5518 break; 5519 } 5520 // Final range checking for Thumb conditional branch instructions. 5521 case ARM::tBcc: 5522 if (!(static_cast<ARMOperand*>(Operands[2]))->isSignedOffset<8, 1>()) 5523 return Error(Operands[2]->getStartLoc(), "branch target out of range"); 5524 break; 5525 case ARM::t2Bcc: { 5526 int Op = (Operands[2]->isImm()) ? 2 : 3; 5527 if (!(static_cast<ARMOperand*>(Operands[Op]))->isSignedOffset<20, 1>()) 5528 return Error(Operands[Op]->getStartLoc(), "branch target out of range"); 5529 break; 5530 } 5531 } 5532 5533 return false; 5534 } 5535 5536 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) { 5537 switch(Opc) { 5538 default: llvm_unreachable("unexpected opcode!"); 5539 // VST1LN 5540 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; 5541 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; 5542 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; 5543 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; 5544 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; 5545 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; 5546 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8; 5547 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16; 5548 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32; 5549 5550 // VST2LN 5551 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; 5552 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; 5553 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; 5554 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; 5555 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; 5556 5557 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; 5558 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; 5559 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; 5560 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; 5561 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; 5562 5563 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8; 5564 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16; 5565 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32; 5566 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16; 5567 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32; 5568 5569 // VST3LN 5570 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; 5571 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; 5572 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; 5573 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD; 5574 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; 5575 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; 5576 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; 5577 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; 5578 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD; 5579 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; 5580 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8; 5581 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16; 5582 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32; 5583 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16; 5584 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32; 5585 5586 // VST3 5587 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; 5588 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; 5589 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; 5590 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; 5591 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; 5592 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; 5593 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; 5594 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; 5595 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; 5596 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; 5597 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; 5598 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; 5599 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8; 5600 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16; 5601 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32; 5602 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8; 5603 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16; 5604 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32; 5605 5606 // VST4LN 5607 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; 5608 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; 5609 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; 5610 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD; 5611 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; 5612 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; 5613 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; 5614 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; 5615 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD; 5616 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; 5617 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8; 5618 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16; 5619 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32; 5620 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16; 5621 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32; 5622 5623 // VST4 5624 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; 5625 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; 5626 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; 5627 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; 5628 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; 5629 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; 5630 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; 5631 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; 5632 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; 5633 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; 5634 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; 5635 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; 5636 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8; 5637 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16; 5638 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32; 5639 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8; 5640 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16; 5641 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32; 5642 } 5643 } 5644 5645 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) { 5646 switch(Opc) { 5647 default: llvm_unreachable("unexpected opcode!"); 5648 // VLD1LN 5649 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; 5650 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; 5651 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; 5652 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; 5653 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; 5654 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; 5655 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8; 5656 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16; 5657 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32; 5658 5659 // VLD2LN 5660 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; 5661 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; 5662 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; 5663 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD; 5664 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; 5665 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; 5666 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; 5667 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; 5668 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD; 5669 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; 5670 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8; 5671 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16; 5672 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32; 5673 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16; 5674 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32; 5675 5676 // VLD3DUP 5677 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; 5678 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; 5679 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; 5680 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD; 5681 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD; 5682 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; 5683 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; 5684 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; 5685 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; 5686 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD; 5687 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD; 5688 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; 5689 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8; 5690 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16; 5691 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32; 5692 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8; 5693 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16; 5694 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32; 5695 5696 // VLD3LN 5697 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; 5698 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; 5699 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; 5700 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD; 5701 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; 5702 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; 5703 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; 5704 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; 5705 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD; 5706 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; 5707 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8; 5708 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16; 5709 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32; 5710 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16; 5711 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32; 5712 5713 // VLD3 5714 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; 5715 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; 5716 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; 5717 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; 5718 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; 5719 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; 5720 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; 5721 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; 5722 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; 5723 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; 5724 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; 5725 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; 5726 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8; 5727 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16; 5728 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32; 5729 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8; 5730 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16; 5731 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32; 5732 5733 // VLD4LN 5734 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; 5735 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; 5736 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; 5737 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD; 5738 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; 5739 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; 5740 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; 5741 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; 5742 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD; 5743 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; 5744 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8; 5745 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16; 5746 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32; 5747 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16; 5748 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32; 5749 5750 // VLD4DUP 5751 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; 5752 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; 5753 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; 5754 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD; 5755 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD; 5756 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; 5757 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; 5758 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; 5759 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; 5760 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD; 5761 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD; 5762 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; 5763 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8; 5764 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16; 5765 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32; 5766 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8; 5767 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16; 5768 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32; 5769 5770 // VLD4 5771 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; 5772 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; 5773 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; 5774 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; 5775 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; 5776 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; 5777 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; 5778 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; 5779 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; 5780 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; 5781 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; 5782 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; 5783 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8; 5784 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16; 5785 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32; 5786 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8; 5787 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16; 5788 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32; 5789 } 5790 } 5791 5792 bool ARMAsmParser:: 5793 processInstruction(MCInst &Inst, 5794 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 5795 switch (Inst.getOpcode()) { 5796 // Alias for alternate form of 'ADR Rd, #imm' instruction. 5797 case ARM::ADDri: { 5798 if (Inst.getOperand(1).getReg() != ARM::PC || 5799 Inst.getOperand(5).getReg() != 0) 5800 return false; 5801 MCInst TmpInst; 5802 TmpInst.setOpcode(ARM::ADR); 5803 TmpInst.addOperand(Inst.getOperand(0)); 5804 TmpInst.addOperand(Inst.getOperand(2)); 5805 TmpInst.addOperand(Inst.getOperand(3)); 5806 TmpInst.addOperand(Inst.getOperand(4)); 5807 Inst = TmpInst; 5808 return true; 5809 } 5810 // Aliases for alternate PC+imm syntax of LDR instructions. 5811 case ARM::t2LDRpcrel: 5812 // Select the narrow version if the immediate will fit. 5813 if (Inst.getOperand(1).getImm() > 0 && 5814 Inst.getOperand(1).getImm() <= 0xff && 5815 !(static_cast<ARMOperand*>(Operands[2])->isToken() && 5816 static_cast<ARMOperand*>(Operands[2])->getToken() == ".w")) 5817 Inst.setOpcode(ARM::tLDRpci); 5818 else 5819 Inst.setOpcode(ARM::t2LDRpci); 5820 return true; 5821 case ARM::t2LDRBpcrel: 5822 Inst.setOpcode(ARM::t2LDRBpci); 5823 return true; 5824 case ARM::t2LDRHpcrel: 5825 Inst.setOpcode(ARM::t2LDRHpci); 5826 return true; 5827 case ARM::t2LDRSBpcrel: 5828 Inst.setOpcode(ARM::t2LDRSBpci); 5829 return true; 5830 case ARM::t2LDRSHpcrel: 5831 Inst.setOpcode(ARM::t2LDRSHpci); 5832 return true; 5833 // Handle NEON VST complex aliases. 5834 case ARM::VST1LNdWB_register_Asm_8: 5835 case ARM::VST1LNdWB_register_Asm_16: 5836 case ARM::VST1LNdWB_register_Asm_32: { 5837 MCInst TmpInst; 5838 // Shuffle the operands around so the lane index operand is in the 5839 // right place. 5840 unsigned Spacing; 5841 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5842 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5843 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5844 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5845 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5846 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5847 TmpInst.addOperand(Inst.getOperand(1)); // lane 5848 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5849 TmpInst.addOperand(Inst.getOperand(6)); 5850 Inst = TmpInst; 5851 return true; 5852 } 5853 5854 case ARM::VST2LNdWB_register_Asm_8: 5855 case ARM::VST2LNdWB_register_Asm_16: 5856 case ARM::VST2LNdWB_register_Asm_32: 5857 case ARM::VST2LNqWB_register_Asm_16: 5858 case ARM::VST2LNqWB_register_Asm_32: { 5859 MCInst TmpInst; 5860 // Shuffle the operands around so the lane index operand is in the 5861 // right place. 5862 unsigned Spacing; 5863 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5864 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5865 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5866 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5867 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5868 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5869 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5870 Spacing)); 5871 TmpInst.addOperand(Inst.getOperand(1)); // lane 5872 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5873 TmpInst.addOperand(Inst.getOperand(6)); 5874 Inst = TmpInst; 5875 return true; 5876 } 5877 5878 case ARM::VST3LNdWB_register_Asm_8: 5879 case ARM::VST3LNdWB_register_Asm_16: 5880 case ARM::VST3LNdWB_register_Asm_32: 5881 case ARM::VST3LNqWB_register_Asm_16: 5882 case ARM::VST3LNqWB_register_Asm_32: { 5883 MCInst TmpInst; 5884 // Shuffle the operands around so the lane index operand is in the 5885 // right place. 5886 unsigned Spacing; 5887 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5888 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5889 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5890 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5891 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5892 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5893 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5894 Spacing)); 5895 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5896 Spacing * 2)); 5897 TmpInst.addOperand(Inst.getOperand(1)); // lane 5898 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5899 TmpInst.addOperand(Inst.getOperand(6)); 5900 Inst = TmpInst; 5901 return true; 5902 } 5903 5904 case ARM::VST4LNdWB_register_Asm_8: 5905 case ARM::VST4LNdWB_register_Asm_16: 5906 case ARM::VST4LNdWB_register_Asm_32: 5907 case ARM::VST4LNqWB_register_Asm_16: 5908 case ARM::VST4LNqWB_register_Asm_32: { 5909 MCInst TmpInst; 5910 // Shuffle the operands around so the lane index operand is in the 5911 // right place. 5912 unsigned Spacing; 5913 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5914 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5915 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5916 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5917 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5918 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5919 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5920 Spacing)); 5921 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5922 Spacing * 2)); 5923 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5924 Spacing * 3)); 5925 TmpInst.addOperand(Inst.getOperand(1)); // lane 5926 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5927 TmpInst.addOperand(Inst.getOperand(6)); 5928 Inst = TmpInst; 5929 return true; 5930 } 5931 5932 case ARM::VST1LNdWB_fixed_Asm_8: 5933 case ARM::VST1LNdWB_fixed_Asm_16: 5934 case ARM::VST1LNdWB_fixed_Asm_32: { 5935 MCInst TmpInst; 5936 // Shuffle the operands around so the lane index operand is in the 5937 // right place. 5938 unsigned Spacing; 5939 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5940 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5941 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5942 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5943 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5944 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5945 TmpInst.addOperand(Inst.getOperand(1)); // lane 5946 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5947 TmpInst.addOperand(Inst.getOperand(5)); 5948 Inst = TmpInst; 5949 return true; 5950 } 5951 5952 case ARM::VST2LNdWB_fixed_Asm_8: 5953 case ARM::VST2LNdWB_fixed_Asm_16: 5954 case ARM::VST2LNdWB_fixed_Asm_32: 5955 case ARM::VST2LNqWB_fixed_Asm_16: 5956 case ARM::VST2LNqWB_fixed_Asm_32: { 5957 MCInst TmpInst; 5958 // Shuffle the operands around so the lane index operand is in the 5959 // right place. 5960 unsigned Spacing; 5961 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5962 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5963 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5964 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5965 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5966 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5967 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5968 Spacing)); 5969 TmpInst.addOperand(Inst.getOperand(1)); // lane 5970 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5971 TmpInst.addOperand(Inst.getOperand(5)); 5972 Inst = TmpInst; 5973 return true; 5974 } 5975 5976 case ARM::VST3LNdWB_fixed_Asm_8: 5977 case ARM::VST3LNdWB_fixed_Asm_16: 5978 case ARM::VST3LNdWB_fixed_Asm_32: 5979 case ARM::VST3LNqWB_fixed_Asm_16: 5980 case ARM::VST3LNqWB_fixed_Asm_32: { 5981 MCInst TmpInst; 5982 // Shuffle the operands around so the lane index operand is in the 5983 // right place. 5984 unsigned Spacing; 5985 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5986 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5987 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5988 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5989 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5990 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5991 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5992 Spacing)); 5993 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5994 Spacing * 2)); 5995 TmpInst.addOperand(Inst.getOperand(1)); // lane 5996 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5997 TmpInst.addOperand(Inst.getOperand(5)); 5998 Inst = TmpInst; 5999 return true; 6000 } 6001 6002 case ARM::VST4LNdWB_fixed_Asm_8: 6003 case ARM::VST4LNdWB_fixed_Asm_16: 6004 case ARM::VST4LNdWB_fixed_Asm_32: 6005 case ARM::VST4LNqWB_fixed_Asm_16: 6006 case ARM::VST4LNqWB_fixed_Asm_32: { 6007 MCInst TmpInst; 6008 // Shuffle the operands around so the lane index operand is in the 6009 // right place. 6010 unsigned Spacing; 6011 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6012 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6013 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6014 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6015 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6016 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6017 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6018 Spacing)); 6019 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6020 Spacing * 2)); 6021 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6022 Spacing * 3)); 6023 TmpInst.addOperand(Inst.getOperand(1)); // lane 6024 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6025 TmpInst.addOperand(Inst.getOperand(5)); 6026 Inst = TmpInst; 6027 return true; 6028 } 6029 6030 case ARM::VST1LNdAsm_8: 6031 case ARM::VST1LNdAsm_16: 6032 case ARM::VST1LNdAsm_32: { 6033 MCInst TmpInst; 6034 // Shuffle the operands around so the lane index operand is in the 6035 // right place. 6036 unsigned Spacing; 6037 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6038 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6039 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6040 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6041 TmpInst.addOperand(Inst.getOperand(1)); // lane 6042 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6043 TmpInst.addOperand(Inst.getOperand(5)); 6044 Inst = TmpInst; 6045 return true; 6046 } 6047 6048 case ARM::VST2LNdAsm_8: 6049 case ARM::VST2LNdAsm_16: 6050 case ARM::VST2LNdAsm_32: 6051 case ARM::VST2LNqAsm_16: 6052 case ARM::VST2LNqAsm_32: { 6053 MCInst TmpInst; 6054 // Shuffle the operands around so the lane index operand is in the 6055 // right place. 6056 unsigned Spacing; 6057 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6058 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6059 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6060 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6061 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6062 Spacing)); 6063 TmpInst.addOperand(Inst.getOperand(1)); // lane 6064 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6065 TmpInst.addOperand(Inst.getOperand(5)); 6066 Inst = TmpInst; 6067 return true; 6068 } 6069 6070 case ARM::VST3LNdAsm_8: 6071 case ARM::VST3LNdAsm_16: 6072 case ARM::VST3LNdAsm_32: 6073 case ARM::VST3LNqAsm_16: 6074 case ARM::VST3LNqAsm_32: { 6075 MCInst TmpInst; 6076 // Shuffle the operands around so the lane index operand is in the 6077 // right place. 6078 unsigned Spacing; 6079 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6080 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6081 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6082 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6083 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6084 Spacing)); 6085 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6086 Spacing * 2)); 6087 TmpInst.addOperand(Inst.getOperand(1)); // lane 6088 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6089 TmpInst.addOperand(Inst.getOperand(5)); 6090 Inst = TmpInst; 6091 return true; 6092 } 6093 6094 case ARM::VST4LNdAsm_8: 6095 case ARM::VST4LNdAsm_16: 6096 case ARM::VST4LNdAsm_32: 6097 case ARM::VST4LNqAsm_16: 6098 case ARM::VST4LNqAsm_32: { 6099 MCInst TmpInst; 6100 // Shuffle the operands around so the lane index operand is in the 6101 // right place. 6102 unsigned Spacing; 6103 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6104 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6105 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6106 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6107 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6108 Spacing)); 6109 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6110 Spacing * 2)); 6111 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6112 Spacing * 3)); 6113 TmpInst.addOperand(Inst.getOperand(1)); // lane 6114 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6115 TmpInst.addOperand(Inst.getOperand(5)); 6116 Inst = TmpInst; 6117 return true; 6118 } 6119 6120 // Handle NEON VLD complex aliases. 6121 case ARM::VLD1LNdWB_register_Asm_8: 6122 case ARM::VLD1LNdWB_register_Asm_16: 6123 case ARM::VLD1LNdWB_register_Asm_32: { 6124 MCInst TmpInst; 6125 // Shuffle the operands around so the lane index operand is in the 6126 // right place. 6127 unsigned Spacing; 6128 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6129 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6130 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6131 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6132 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6133 TmpInst.addOperand(Inst.getOperand(4)); // Rm 6134 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6135 TmpInst.addOperand(Inst.getOperand(1)); // lane 6136 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 6137 TmpInst.addOperand(Inst.getOperand(6)); 6138 Inst = TmpInst; 6139 return true; 6140 } 6141 6142 case ARM::VLD2LNdWB_register_Asm_8: 6143 case ARM::VLD2LNdWB_register_Asm_16: 6144 case ARM::VLD2LNdWB_register_Asm_32: 6145 case ARM::VLD2LNqWB_register_Asm_16: 6146 case ARM::VLD2LNqWB_register_Asm_32: { 6147 MCInst TmpInst; 6148 // Shuffle the operands around so the lane index operand is in the 6149 // right place. 6150 unsigned Spacing; 6151 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6152 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6153 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6154 Spacing)); 6155 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6156 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6157 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6158 TmpInst.addOperand(Inst.getOperand(4)); // Rm 6159 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6160 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6161 Spacing)); 6162 TmpInst.addOperand(Inst.getOperand(1)); // lane 6163 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 6164 TmpInst.addOperand(Inst.getOperand(6)); 6165 Inst = TmpInst; 6166 return true; 6167 } 6168 6169 case ARM::VLD3LNdWB_register_Asm_8: 6170 case ARM::VLD3LNdWB_register_Asm_16: 6171 case ARM::VLD3LNdWB_register_Asm_32: 6172 case ARM::VLD3LNqWB_register_Asm_16: 6173 case ARM::VLD3LNqWB_register_Asm_32: { 6174 MCInst TmpInst; 6175 // Shuffle the operands around so the lane index operand is in the 6176 // right place. 6177 unsigned Spacing; 6178 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6179 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6180 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6181 Spacing)); 6182 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6183 Spacing * 2)); 6184 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6185 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6186 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6187 TmpInst.addOperand(Inst.getOperand(4)); // Rm 6188 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6189 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6190 Spacing)); 6191 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6192 Spacing * 2)); 6193 TmpInst.addOperand(Inst.getOperand(1)); // lane 6194 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 6195 TmpInst.addOperand(Inst.getOperand(6)); 6196 Inst = TmpInst; 6197 return true; 6198 } 6199 6200 case ARM::VLD4LNdWB_register_Asm_8: 6201 case ARM::VLD4LNdWB_register_Asm_16: 6202 case ARM::VLD4LNdWB_register_Asm_32: 6203 case ARM::VLD4LNqWB_register_Asm_16: 6204 case ARM::VLD4LNqWB_register_Asm_32: { 6205 MCInst TmpInst; 6206 // Shuffle the operands around so the lane index operand is in the 6207 // right place. 6208 unsigned Spacing; 6209 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6210 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6211 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6212 Spacing)); 6213 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6214 Spacing * 2)); 6215 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6216 Spacing * 3)); 6217 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6218 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6219 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6220 TmpInst.addOperand(Inst.getOperand(4)); // Rm 6221 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6222 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6223 Spacing)); 6224 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6225 Spacing * 2)); 6226 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6227 Spacing * 3)); 6228 TmpInst.addOperand(Inst.getOperand(1)); // lane 6229 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 6230 TmpInst.addOperand(Inst.getOperand(6)); 6231 Inst = TmpInst; 6232 return true; 6233 } 6234 6235 case ARM::VLD1LNdWB_fixed_Asm_8: 6236 case ARM::VLD1LNdWB_fixed_Asm_16: 6237 case ARM::VLD1LNdWB_fixed_Asm_32: { 6238 MCInst TmpInst; 6239 // Shuffle the operands around so the lane index operand is in the 6240 // right place. 6241 unsigned Spacing; 6242 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6243 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6244 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6245 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6246 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6247 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6248 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6249 TmpInst.addOperand(Inst.getOperand(1)); // lane 6250 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6251 TmpInst.addOperand(Inst.getOperand(5)); 6252 Inst = TmpInst; 6253 return true; 6254 } 6255 6256 case ARM::VLD2LNdWB_fixed_Asm_8: 6257 case ARM::VLD2LNdWB_fixed_Asm_16: 6258 case ARM::VLD2LNdWB_fixed_Asm_32: 6259 case ARM::VLD2LNqWB_fixed_Asm_16: 6260 case ARM::VLD2LNqWB_fixed_Asm_32: { 6261 MCInst TmpInst; 6262 // Shuffle the operands around so the lane index operand is in the 6263 // right place. 6264 unsigned Spacing; 6265 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6266 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6267 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6268 Spacing)); 6269 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6270 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6271 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6272 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6273 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6274 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6275 Spacing)); 6276 TmpInst.addOperand(Inst.getOperand(1)); // lane 6277 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6278 TmpInst.addOperand(Inst.getOperand(5)); 6279 Inst = TmpInst; 6280 return true; 6281 } 6282 6283 case ARM::VLD3LNdWB_fixed_Asm_8: 6284 case ARM::VLD3LNdWB_fixed_Asm_16: 6285 case ARM::VLD3LNdWB_fixed_Asm_32: 6286 case ARM::VLD3LNqWB_fixed_Asm_16: 6287 case ARM::VLD3LNqWB_fixed_Asm_32: { 6288 MCInst TmpInst; 6289 // Shuffle the operands around so the lane index operand is in the 6290 // right place. 6291 unsigned Spacing; 6292 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6293 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6294 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6295 Spacing)); 6296 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6297 Spacing * 2)); 6298 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6299 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6300 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6301 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6302 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6303 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6304 Spacing)); 6305 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6306 Spacing * 2)); 6307 TmpInst.addOperand(Inst.getOperand(1)); // lane 6308 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6309 TmpInst.addOperand(Inst.getOperand(5)); 6310 Inst = TmpInst; 6311 return true; 6312 } 6313 6314 case ARM::VLD4LNdWB_fixed_Asm_8: 6315 case ARM::VLD4LNdWB_fixed_Asm_16: 6316 case ARM::VLD4LNdWB_fixed_Asm_32: 6317 case ARM::VLD4LNqWB_fixed_Asm_16: 6318 case ARM::VLD4LNqWB_fixed_Asm_32: { 6319 MCInst TmpInst; 6320 // Shuffle the operands around so the lane index operand is in the 6321 // right place. 6322 unsigned Spacing; 6323 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6324 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6325 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6326 Spacing)); 6327 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6328 Spacing * 2)); 6329 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6330 Spacing * 3)); 6331 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6332 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6333 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6334 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6335 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6336 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6337 Spacing)); 6338 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6339 Spacing * 2)); 6340 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6341 Spacing * 3)); 6342 TmpInst.addOperand(Inst.getOperand(1)); // lane 6343 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6344 TmpInst.addOperand(Inst.getOperand(5)); 6345 Inst = TmpInst; 6346 return true; 6347 } 6348 6349 case ARM::VLD1LNdAsm_8: 6350 case ARM::VLD1LNdAsm_16: 6351 case ARM::VLD1LNdAsm_32: { 6352 MCInst TmpInst; 6353 // Shuffle the operands around so the lane index operand is in the 6354 // right place. 6355 unsigned Spacing; 6356 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6357 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6358 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6359 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6360 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6361 TmpInst.addOperand(Inst.getOperand(1)); // lane 6362 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6363 TmpInst.addOperand(Inst.getOperand(5)); 6364 Inst = TmpInst; 6365 return true; 6366 } 6367 6368 case ARM::VLD2LNdAsm_8: 6369 case ARM::VLD2LNdAsm_16: 6370 case ARM::VLD2LNdAsm_32: 6371 case ARM::VLD2LNqAsm_16: 6372 case ARM::VLD2LNqAsm_32: { 6373 MCInst TmpInst; 6374 // Shuffle the operands around so the lane index operand is in the 6375 // right place. 6376 unsigned Spacing; 6377 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6378 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6379 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6380 Spacing)); 6381 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6382 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6383 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6384 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6385 Spacing)); 6386 TmpInst.addOperand(Inst.getOperand(1)); // lane 6387 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6388 TmpInst.addOperand(Inst.getOperand(5)); 6389 Inst = TmpInst; 6390 return true; 6391 } 6392 6393 case ARM::VLD3LNdAsm_8: 6394 case ARM::VLD3LNdAsm_16: 6395 case ARM::VLD3LNdAsm_32: 6396 case ARM::VLD3LNqAsm_16: 6397 case ARM::VLD3LNqAsm_32: { 6398 MCInst TmpInst; 6399 // Shuffle the operands around so the lane index operand is in the 6400 // right place. 6401 unsigned Spacing; 6402 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6403 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6404 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6405 Spacing)); 6406 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6407 Spacing * 2)); 6408 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6409 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6410 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6411 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6412 Spacing)); 6413 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6414 Spacing * 2)); 6415 TmpInst.addOperand(Inst.getOperand(1)); // lane 6416 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6417 TmpInst.addOperand(Inst.getOperand(5)); 6418 Inst = TmpInst; 6419 return true; 6420 } 6421 6422 case ARM::VLD4LNdAsm_8: 6423 case ARM::VLD4LNdAsm_16: 6424 case ARM::VLD4LNdAsm_32: 6425 case ARM::VLD4LNqAsm_16: 6426 case ARM::VLD4LNqAsm_32: { 6427 MCInst TmpInst; 6428 // Shuffle the operands around so the lane index operand is in the 6429 // right place. 6430 unsigned Spacing; 6431 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6432 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6433 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6434 Spacing)); 6435 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6436 Spacing * 2)); 6437 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6438 Spacing * 3)); 6439 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6440 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6441 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6442 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6443 Spacing)); 6444 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6445 Spacing * 2)); 6446 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6447 Spacing * 3)); 6448 TmpInst.addOperand(Inst.getOperand(1)); // lane 6449 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6450 TmpInst.addOperand(Inst.getOperand(5)); 6451 Inst = TmpInst; 6452 return true; 6453 } 6454 6455 // VLD3DUP single 3-element structure to all lanes instructions. 6456 case ARM::VLD3DUPdAsm_8: 6457 case ARM::VLD3DUPdAsm_16: 6458 case ARM::VLD3DUPdAsm_32: 6459 case ARM::VLD3DUPqAsm_8: 6460 case ARM::VLD3DUPqAsm_16: 6461 case ARM::VLD3DUPqAsm_32: { 6462 MCInst TmpInst; 6463 unsigned Spacing; 6464 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6465 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6466 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6467 Spacing)); 6468 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6469 Spacing * 2)); 6470 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6471 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6472 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6473 TmpInst.addOperand(Inst.getOperand(4)); 6474 Inst = TmpInst; 6475 return true; 6476 } 6477 6478 case ARM::VLD3DUPdWB_fixed_Asm_8: 6479 case ARM::VLD3DUPdWB_fixed_Asm_16: 6480 case ARM::VLD3DUPdWB_fixed_Asm_32: 6481 case ARM::VLD3DUPqWB_fixed_Asm_8: 6482 case ARM::VLD3DUPqWB_fixed_Asm_16: 6483 case ARM::VLD3DUPqWB_fixed_Asm_32: { 6484 MCInst TmpInst; 6485 unsigned Spacing; 6486 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6487 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6488 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6489 Spacing)); 6490 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6491 Spacing * 2)); 6492 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6493 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6494 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6495 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6496 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6497 TmpInst.addOperand(Inst.getOperand(4)); 6498 Inst = TmpInst; 6499 return true; 6500 } 6501 6502 case ARM::VLD3DUPdWB_register_Asm_8: 6503 case ARM::VLD3DUPdWB_register_Asm_16: 6504 case ARM::VLD3DUPdWB_register_Asm_32: 6505 case ARM::VLD3DUPqWB_register_Asm_8: 6506 case ARM::VLD3DUPqWB_register_Asm_16: 6507 case ARM::VLD3DUPqWB_register_Asm_32: { 6508 MCInst TmpInst; 6509 unsigned Spacing; 6510 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6511 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6512 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6513 Spacing)); 6514 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6515 Spacing * 2)); 6516 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6517 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6518 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6519 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6520 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6521 TmpInst.addOperand(Inst.getOperand(5)); 6522 Inst = TmpInst; 6523 return true; 6524 } 6525 6526 // VLD3 multiple 3-element structure instructions. 6527 case ARM::VLD3dAsm_8: 6528 case ARM::VLD3dAsm_16: 6529 case ARM::VLD3dAsm_32: 6530 case ARM::VLD3qAsm_8: 6531 case ARM::VLD3qAsm_16: 6532 case ARM::VLD3qAsm_32: { 6533 MCInst TmpInst; 6534 unsigned Spacing; 6535 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6536 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6537 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6538 Spacing)); 6539 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6540 Spacing * 2)); 6541 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6542 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6543 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6544 TmpInst.addOperand(Inst.getOperand(4)); 6545 Inst = TmpInst; 6546 return true; 6547 } 6548 6549 case ARM::VLD3dWB_fixed_Asm_8: 6550 case ARM::VLD3dWB_fixed_Asm_16: 6551 case ARM::VLD3dWB_fixed_Asm_32: 6552 case ARM::VLD3qWB_fixed_Asm_8: 6553 case ARM::VLD3qWB_fixed_Asm_16: 6554 case ARM::VLD3qWB_fixed_Asm_32: { 6555 MCInst TmpInst; 6556 unsigned Spacing; 6557 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6558 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6559 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6560 Spacing)); 6561 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6562 Spacing * 2)); 6563 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6564 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6565 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6566 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6567 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6568 TmpInst.addOperand(Inst.getOperand(4)); 6569 Inst = TmpInst; 6570 return true; 6571 } 6572 6573 case ARM::VLD3dWB_register_Asm_8: 6574 case ARM::VLD3dWB_register_Asm_16: 6575 case ARM::VLD3dWB_register_Asm_32: 6576 case ARM::VLD3qWB_register_Asm_8: 6577 case ARM::VLD3qWB_register_Asm_16: 6578 case ARM::VLD3qWB_register_Asm_32: { 6579 MCInst TmpInst; 6580 unsigned Spacing; 6581 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6582 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6583 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6584 Spacing)); 6585 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6586 Spacing * 2)); 6587 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6588 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6589 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6590 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6591 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6592 TmpInst.addOperand(Inst.getOperand(5)); 6593 Inst = TmpInst; 6594 return true; 6595 } 6596 6597 // VLD4DUP single 3-element structure to all lanes instructions. 6598 case ARM::VLD4DUPdAsm_8: 6599 case ARM::VLD4DUPdAsm_16: 6600 case ARM::VLD4DUPdAsm_32: 6601 case ARM::VLD4DUPqAsm_8: 6602 case ARM::VLD4DUPqAsm_16: 6603 case ARM::VLD4DUPqAsm_32: { 6604 MCInst TmpInst; 6605 unsigned Spacing; 6606 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6607 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6608 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6609 Spacing)); 6610 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6611 Spacing * 2)); 6612 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6613 Spacing * 3)); 6614 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6615 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6616 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6617 TmpInst.addOperand(Inst.getOperand(4)); 6618 Inst = TmpInst; 6619 return true; 6620 } 6621 6622 case ARM::VLD4DUPdWB_fixed_Asm_8: 6623 case ARM::VLD4DUPdWB_fixed_Asm_16: 6624 case ARM::VLD4DUPdWB_fixed_Asm_32: 6625 case ARM::VLD4DUPqWB_fixed_Asm_8: 6626 case ARM::VLD4DUPqWB_fixed_Asm_16: 6627 case ARM::VLD4DUPqWB_fixed_Asm_32: { 6628 MCInst TmpInst; 6629 unsigned Spacing; 6630 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6631 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6632 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6633 Spacing)); 6634 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6635 Spacing * 2)); 6636 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6637 Spacing * 3)); 6638 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6639 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6640 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6641 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6642 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6643 TmpInst.addOperand(Inst.getOperand(4)); 6644 Inst = TmpInst; 6645 return true; 6646 } 6647 6648 case ARM::VLD4DUPdWB_register_Asm_8: 6649 case ARM::VLD4DUPdWB_register_Asm_16: 6650 case ARM::VLD4DUPdWB_register_Asm_32: 6651 case ARM::VLD4DUPqWB_register_Asm_8: 6652 case ARM::VLD4DUPqWB_register_Asm_16: 6653 case ARM::VLD4DUPqWB_register_Asm_32: { 6654 MCInst TmpInst; 6655 unsigned Spacing; 6656 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6657 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6658 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6659 Spacing)); 6660 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6661 Spacing * 2)); 6662 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6663 Spacing * 3)); 6664 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6665 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6666 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6667 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6668 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6669 TmpInst.addOperand(Inst.getOperand(5)); 6670 Inst = TmpInst; 6671 return true; 6672 } 6673 6674 // VLD4 multiple 4-element structure instructions. 6675 case ARM::VLD4dAsm_8: 6676 case ARM::VLD4dAsm_16: 6677 case ARM::VLD4dAsm_32: 6678 case ARM::VLD4qAsm_8: 6679 case ARM::VLD4qAsm_16: 6680 case ARM::VLD4qAsm_32: { 6681 MCInst TmpInst; 6682 unsigned Spacing; 6683 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6684 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6685 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6686 Spacing)); 6687 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6688 Spacing * 2)); 6689 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6690 Spacing * 3)); 6691 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6692 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6693 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6694 TmpInst.addOperand(Inst.getOperand(4)); 6695 Inst = TmpInst; 6696 return true; 6697 } 6698 6699 case ARM::VLD4dWB_fixed_Asm_8: 6700 case ARM::VLD4dWB_fixed_Asm_16: 6701 case ARM::VLD4dWB_fixed_Asm_32: 6702 case ARM::VLD4qWB_fixed_Asm_8: 6703 case ARM::VLD4qWB_fixed_Asm_16: 6704 case ARM::VLD4qWB_fixed_Asm_32: { 6705 MCInst TmpInst; 6706 unsigned Spacing; 6707 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6708 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6709 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6710 Spacing)); 6711 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6712 Spacing * 2)); 6713 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6714 Spacing * 3)); 6715 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6716 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6717 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6718 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6719 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6720 TmpInst.addOperand(Inst.getOperand(4)); 6721 Inst = TmpInst; 6722 return true; 6723 } 6724 6725 case ARM::VLD4dWB_register_Asm_8: 6726 case ARM::VLD4dWB_register_Asm_16: 6727 case ARM::VLD4dWB_register_Asm_32: 6728 case ARM::VLD4qWB_register_Asm_8: 6729 case ARM::VLD4qWB_register_Asm_16: 6730 case ARM::VLD4qWB_register_Asm_32: { 6731 MCInst TmpInst; 6732 unsigned Spacing; 6733 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6734 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6735 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6736 Spacing)); 6737 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6738 Spacing * 2)); 6739 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6740 Spacing * 3)); 6741 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6742 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6743 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6744 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6745 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6746 TmpInst.addOperand(Inst.getOperand(5)); 6747 Inst = TmpInst; 6748 return true; 6749 } 6750 6751 // VST3 multiple 3-element structure instructions. 6752 case ARM::VST3dAsm_8: 6753 case ARM::VST3dAsm_16: 6754 case ARM::VST3dAsm_32: 6755 case ARM::VST3qAsm_8: 6756 case ARM::VST3qAsm_16: 6757 case ARM::VST3qAsm_32: { 6758 MCInst TmpInst; 6759 unsigned Spacing; 6760 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6761 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6762 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6763 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6764 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6765 Spacing)); 6766 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6767 Spacing * 2)); 6768 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6769 TmpInst.addOperand(Inst.getOperand(4)); 6770 Inst = TmpInst; 6771 return true; 6772 } 6773 6774 case ARM::VST3dWB_fixed_Asm_8: 6775 case ARM::VST3dWB_fixed_Asm_16: 6776 case ARM::VST3dWB_fixed_Asm_32: 6777 case ARM::VST3qWB_fixed_Asm_8: 6778 case ARM::VST3qWB_fixed_Asm_16: 6779 case ARM::VST3qWB_fixed_Asm_32: { 6780 MCInst TmpInst; 6781 unsigned Spacing; 6782 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6783 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6784 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6785 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6786 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6787 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6788 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6789 Spacing)); 6790 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6791 Spacing * 2)); 6792 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6793 TmpInst.addOperand(Inst.getOperand(4)); 6794 Inst = TmpInst; 6795 return true; 6796 } 6797 6798 case ARM::VST3dWB_register_Asm_8: 6799 case ARM::VST3dWB_register_Asm_16: 6800 case ARM::VST3dWB_register_Asm_32: 6801 case ARM::VST3qWB_register_Asm_8: 6802 case ARM::VST3qWB_register_Asm_16: 6803 case ARM::VST3qWB_register_Asm_32: { 6804 MCInst TmpInst; 6805 unsigned Spacing; 6806 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6807 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6808 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6809 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6810 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6811 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6812 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6813 Spacing)); 6814 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6815 Spacing * 2)); 6816 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6817 TmpInst.addOperand(Inst.getOperand(5)); 6818 Inst = TmpInst; 6819 return true; 6820 } 6821 6822 // VST4 multiple 3-element structure instructions. 6823 case ARM::VST4dAsm_8: 6824 case ARM::VST4dAsm_16: 6825 case ARM::VST4dAsm_32: 6826 case ARM::VST4qAsm_8: 6827 case ARM::VST4qAsm_16: 6828 case ARM::VST4qAsm_32: { 6829 MCInst TmpInst; 6830 unsigned Spacing; 6831 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6832 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6833 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6834 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6835 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6836 Spacing)); 6837 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6838 Spacing * 2)); 6839 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6840 Spacing * 3)); 6841 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6842 TmpInst.addOperand(Inst.getOperand(4)); 6843 Inst = TmpInst; 6844 return true; 6845 } 6846 6847 case ARM::VST4dWB_fixed_Asm_8: 6848 case ARM::VST4dWB_fixed_Asm_16: 6849 case ARM::VST4dWB_fixed_Asm_32: 6850 case ARM::VST4qWB_fixed_Asm_8: 6851 case ARM::VST4qWB_fixed_Asm_16: 6852 case ARM::VST4qWB_fixed_Asm_32: { 6853 MCInst TmpInst; 6854 unsigned Spacing; 6855 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6856 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6857 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6858 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6859 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6860 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6861 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6862 Spacing)); 6863 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6864 Spacing * 2)); 6865 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6866 Spacing * 3)); 6867 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6868 TmpInst.addOperand(Inst.getOperand(4)); 6869 Inst = TmpInst; 6870 return true; 6871 } 6872 6873 case ARM::VST4dWB_register_Asm_8: 6874 case ARM::VST4dWB_register_Asm_16: 6875 case ARM::VST4dWB_register_Asm_32: 6876 case ARM::VST4qWB_register_Asm_8: 6877 case ARM::VST4qWB_register_Asm_16: 6878 case ARM::VST4qWB_register_Asm_32: { 6879 MCInst TmpInst; 6880 unsigned Spacing; 6881 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6882 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6883 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6884 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6885 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6886 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6887 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6888 Spacing)); 6889 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6890 Spacing * 2)); 6891 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6892 Spacing * 3)); 6893 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6894 TmpInst.addOperand(Inst.getOperand(5)); 6895 Inst = TmpInst; 6896 return true; 6897 } 6898 6899 // Handle encoding choice for the shift-immediate instructions. 6900 case ARM::t2LSLri: 6901 case ARM::t2LSRri: 6902 case ARM::t2ASRri: { 6903 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 6904 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && 6905 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) && 6906 !(static_cast<ARMOperand*>(Operands[3])->isToken() && 6907 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) { 6908 unsigned NewOpc; 6909 switch (Inst.getOpcode()) { 6910 default: llvm_unreachable("unexpected opcode"); 6911 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break; 6912 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break; 6913 case ARM::t2ASRri: NewOpc = ARM::tASRri; break; 6914 } 6915 // The Thumb1 operands aren't in the same order. Awesome, eh? 6916 MCInst TmpInst; 6917 TmpInst.setOpcode(NewOpc); 6918 TmpInst.addOperand(Inst.getOperand(0)); 6919 TmpInst.addOperand(Inst.getOperand(5)); 6920 TmpInst.addOperand(Inst.getOperand(1)); 6921 TmpInst.addOperand(Inst.getOperand(2)); 6922 TmpInst.addOperand(Inst.getOperand(3)); 6923 TmpInst.addOperand(Inst.getOperand(4)); 6924 Inst = TmpInst; 6925 return true; 6926 } 6927 return false; 6928 } 6929 6930 // Handle the Thumb2 mode MOV complex aliases. 6931 case ARM::t2MOVsr: 6932 case ARM::t2MOVSsr: { 6933 // Which instruction to expand to depends on the CCOut operand and 6934 // whether we're in an IT block if the register operands are low 6935 // registers. 6936 bool isNarrow = false; 6937 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 6938 isARMLowRegister(Inst.getOperand(1).getReg()) && 6939 isARMLowRegister(Inst.getOperand(2).getReg()) && 6940 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && 6941 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr)) 6942 isNarrow = true; 6943 MCInst TmpInst; 6944 unsigned newOpc; 6945 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) { 6946 default: llvm_unreachable("unexpected opcode!"); 6947 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break; 6948 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break; 6949 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break; 6950 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break; 6951 } 6952 TmpInst.setOpcode(newOpc); 6953 TmpInst.addOperand(Inst.getOperand(0)); // Rd 6954 if (isNarrow) 6955 TmpInst.addOperand(MCOperand::CreateReg( 6956 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); 6957 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6958 TmpInst.addOperand(Inst.getOperand(2)); // Rm 6959 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6960 TmpInst.addOperand(Inst.getOperand(5)); 6961 if (!isNarrow) 6962 TmpInst.addOperand(MCOperand::CreateReg( 6963 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); 6964 Inst = TmpInst; 6965 return true; 6966 } 6967 case ARM::t2MOVsi: 6968 case ARM::t2MOVSsi: { 6969 // Which instruction to expand to depends on the CCOut operand and 6970 // whether we're in an IT block if the register operands are low 6971 // registers. 6972 bool isNarrow = false; 6973 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 6974 isARMLowRegister(Inst.getOperand(1).getReg()) && 6975 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi)) 6976 isNarrow = true; 6977 MCInst TmpInst; 6978 unsigned newOpc; 6979 switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) { 6980 default: llvm_unreachable("unexpected opcode!"); 6981 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break; 6982 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break; 6983 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break; 6984 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break; 6985 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break; 6986 } 6987 unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()); 6988 if (Amount == 32) Amount = 0; 6989 TmpInst.setOpcode(newOpc); 6990 TmpInst.addOperand(Inst.getOperand(0)); // Rd 6991 if (isNarrow) 6992 TmpInst.addOperand(MCOperand::CreateReg( 6993 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 6994 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6995 if (newOpc != ARM::t2RRX) 6996 TmpInst.addOperand(MCOperand::CreateImm(Amount)); 6997 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6998 TmpInst.addOperand(Inst.getOperand(4)); 6999 if (!isNarrow) 7000 TmpInst.addOperand(MCOperand::CreateReg( 7001 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 7002 Inst = TmpInst; 7003 return true; 7004 } 7005 // Handle the ARM mode MOV complex aliases. 7006 case ARM::ASRr: 7007 case ARM::LSRr: 7008 case ARM::LSLr: 7009 case ARM::RORr: { 7010 ARM_AM::ShiftOpc ShiftTy; 7011 switch(Inst.getOpcode()) { 7012 default: llvm_unreachable("unexpected opcode!"); 7013 case ARM::ASRr: ShiftTy = ARM_AM::asr; break; 7014 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; 7015 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; 7016 case ARM::RORr: ShiftTy = ARM_AM::ror; break; 7017 } 7018 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); 7019 MCInst TmpInst; 7020 TmpInst.setOpcode(ARM::MOVsr); 7021 TmpInst.addOperand(Inst.getOperand(0)); // Rd 7022 TmpInst.addOperand(Inst.getOperand(1)); // Rn 7023 TmpInst.addOperand(Inst.getOperand(2)); // Rm 7024 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 7025 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 7026 TmpInst.addOperand(Inst.getOperand(4)); 7027 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 7028 Inst = TmpInst; 7029 return true; 7030 } 7031 case ARM::ASRi: 7032 case ARM::LSRi: 7033 case ARM::LSLi: 7034 case ARM::RORi: { 7035 ARM_AM::ShiftOpc ShiftTy; 7036 switch(Inst.getOpcode()) { 7037 default: llvm_unreachable("unexpected opcode!"); 7038 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 7039 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 7040 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 7041 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 7042 } 7043 // A shift by zero is a plain MOVr, not a MOVsi. 7044 unsigned Amt = Inst.getOperand(2).getImm(); 7045 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 7046 // A shift by 32 should be encoded as 0 when permitted 7047 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr)) 7048 Amt = 0; 7049 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 7050 MCInst TmpInst; 7051 TmpInst.setOpcode(Opc); 7052 TmpInst.addOperand(Inst.getOperand(0)); // Rd 7053 TmpInst.addOperand(Inst.getOperand(1)); // Rn 7054 if (Opc == ARM::MOVsi) 7055 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 7056 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 7057 TmpInst.addOperand(Inst.getOperand(4)); 7058 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 7059 Inst = TmpInst; 7060 return true; 7061 } 7062 case ARM::RRXi: { 7063 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); 7064 MCInst TmpInst; 7065 TmpInst.setOpcode(ARM::MOVsi); 7066 TmpInst.addOperand(Inst.getOperand(0)); // Rd 7067 TmpInst.addOperand(Inst.getOperand(1)); // Rn 7068 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 7069 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 7070 TmpInst.addOperand(Inst.getOperand(3)); 7071 TmpInst.addOperand(Inst.getOperand(4)); // cc_out 7072 Inst = TmpInst; 7073 return true; 7074 } 7075 case ARM::t2LDMIA_UPD: { 7076 // If this is a load of a single register, then we should use 7077 // a post-indexed LDR instruction instead, per the ARM ARM. 7078 if (Inst.getNumOperands() != 5) 7079 return false; 7080 MCInst TmpInst; 7081 TmpInst.setOpcode(ARM::t2LDR_POST); 7082 TmpInst.addOperand(Inst.getOperand(4)); // Rt 7083 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 7084 TmpInst.addOperand(Inst.getOperand(1)); // Rn 7085 TmpInst.addOperand(MCOperand::CreateImm(4)); 7086 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 7087 TmpInst.addOperand(Inst.getOperand(3)); 7088 Inst = TmpInst; 7089 return true; 7090 } 7091 case ARM::t2STMDB_UPD: { 7092 // If this is a store of a single register, then we should use 7093 // a pre-indexed STR instruction instead, per the ARM ARM. 7094 if (Inst.getNumOperands() != 5) 7095 return false; 7096 MCInst TmpInst; 7097 TmpInst.setOpcode(ARM::t2STR_PRE); 7098 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 7099 TmpInst.addOperand(Inst.getOperand(4)); // Rt 7100 TmpInst.addOperand(Inst.getOperand(1)); // Rn 7101 TmpInst.addOperand(MCOperand::CreateImm(-4)); 7102 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 7103 TmpInst.addOperand(Inst.getOperand(3)); 7104 Inst = TmpInst; 7105 return true; 7106 } 7107 case ARM::LDMIA_UPD: 7108 // If this is a load of a single register via a 'pop', then we should use 7109 // a post-indexed LDR instruction instead, per the ARM ARM. 7110 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 7111 Inst.getNumOperands() == 5) { 7112 MCInst TmpInst; 7113 TmpInst.setOpcode(ARM::LDR_POST_IMM); 7114 TmpInst.addOperand(Inst.getOperand(4)); // Rt 7115 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 7116 TmpInst.addOperand(Inst.getOperand(1)); // Rn 7117 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 7118 TmpInst.addOperand(MCOperand::CreateImm(4)); 7119 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 7120 TmpInst.addOperand(Inst.getOperand(3)); 7121 Inst = TmpInst; 7122 return true; 7123 } 7124 break; 7125 case ARM::STMDB_UPD: 7126 // If this is a store of a single register via a 'push', then we should use 7127 // a pre-indexed STR instruction instead, per the ARM ARM. 7128 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 7129 Inst.getNumOperands() == 5) { 7130 MCInst TmpInst; 7131 TmpInst.setOpcode(ARM::STR_PRE_IMM); 7132 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 7133 TmpInst.addOperand(Inst.getOperand(4)); // Rt 7134 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 7135 TmpInst.addOperand(MCOperand::CreateImm(-4)); 7136 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 7137 TmpInst.addOperand(Inst.getOperand(3)); 7138 Inst = TmpInst; 7139 } 7140 break; 7141 case ARM::t2ADDri12: 7142 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add" 7143 // mnemonic was used (not "addw"), encoding T3 is preferred. 7144 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" || 7145 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 7146 break; 7147 Inst.setOpcode(ARM::t2ADDri); 7148 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 7149 break; 7150 case ARM::t2SUBri12: 7151 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub" 7152 // mnemonic was used (not "subw"), encoding T3 is preferred. 7153 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" || 7154 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 7155 break; 7156 Inst.setOpcode(ARM::t2SUBri); 7157 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 7158 break; 7159 case ARM::tADDi8: 7160 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 7161 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 7162 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 7163 // to encoding T1 if <Rd> is omitted." 7164 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 7165 Inst.setOpcode(ARM::tADDi3); 7166 return true; 7167 } 7168 break; 7169 case ARM::tSUBi8: 7170 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 7171 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 7172 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 7173 // to encoding T1 if <Rd> is omitted." 7174 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 7175 Inst.setOpcode(ARM::tSUBi3); 7176 return true; 7177 } 7178 break; 7179 case ARM::t2ADDri: 7180 case ARM::t2SUBri: { 7181 // If the destination and first source operand are the same, and 7182 // the flags are compatible with the current IT status, use encoding T2 7183 // instead of T3. For compatibility with the system 'as'. Make sure the 7184 // wide encoding wasn't explicit. 7185 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 7186 !isARMLowRegister(Inst.getOperand(0).getReg()) || 7187 (unsigned)Inst.getOperand(2).getImm() > 255 || 7188 ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) || 7189 (inITBlock() && Inst.getOperand(5).getReg() != 0)) || 7190 (static_cast<ARMOperand*>(Operands[3])->isToken() && 7191 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) 7192 break; 7193 MCInst TmpInst; 7194 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ? 7195 ARM::tADDi8 : ARM::tSUBi8); 7196 TmpInst.addOperand(Inst.getOperand(0)); 7197 TmpInst.addOperand(Inst.getOperand(5)); 7198 TmpInst.addOperand(Inst.getOperand(0)); 7199 TmpInst.addOperand(Inst.getOperand(2)); 7200 TmpInst.addOperand(Inst.getOperand(3)); 7201 TmpInst.addOperand(Inst.getOperand(4)); 7202 Inst = TmpInst; 7203 return true; 7204 } 7205 case ARM::t2ADDrr: { 7206 // If the destination and first source operand are the same, and 7207 // there's no setting of the flags, use encoding T2 instead of T3. 7208 // Note that this is only for ADD, not SUB. This mirrors the system 7209 // 'as' behaviour. Make sure the wide encoding wasn't explicit. 7210 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 7211 Inst.getOperand(5).getReg() != 0 || 7212 (static_cast<ARMOperand*>(Operands[3])->isToken() && 7213 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) 7214 break; 7215 MCInst TmpInst; 7216 TmpInst.setOpcode(ARM::tADDhirr); 7217 TmpInst.addOperand(Inst.getOperand(0)); 7218 TmpInst.addOperand(Inst.getOperand(0)); 7219 TmpInst.addOperand(Inst.getOperand(2)); 7220 TmpInst.addOperand(Inst.getOperand(3)); 7221 TmpInst.addOperand(Inst.getOperand(4)); 7222 Inst = TmpInst; 7223 return true; 7224 } 7225 case ARM::tADDrSP: { 7226 // If the non-SP source operand and the destination operand are not the 7227 // same, we need to use the 32-bit encoding if it's available. 7228 if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) { 7229 Inst.setOpcode(ARM::t2ADDrr); 7230 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 7231 return true; 7232 } 7233 break; 7234 } 7235 case ARM::tB: 7236 // A Thumb conditional branch outside of an IT block is a tBcc. 7237 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 7238 Inst.setOpcode(ARM::tBcc); 7239 return true; 7240 } 7241 break; 7242 case ARM::t2B: 7243 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 7244 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 7245 Inst.setOpcode(ARM::t2Bcc); 7246 return true; 7247 } 7248 break; 7249 case ARM::t2Bcc: 7250 // If the conditional is AL or we're in an IT block, we really want t2B. 7251 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 7252 Inst.setOpcode(ARM::t2B); 7253 return true; 7254 } 7255 break; 7256 case ARM::tBcc: 7257 // If the conditional is AL, we really want tB. 7258 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 7259 Inst.setOpcode(ARM::tB); 7260 return true; 7261 } 7262 break; 7263 case ARM::tLDMIA: { 7264 // If the register list contains any high registers, or if the writeback 7265 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 7266 // instead if we're in Thumb2. Otherwise, this should have generated 7267 // an error in validateInstruction(). 7268 unsigned Rn = Inst.getOperand(0).getReg(); 7269 bool hasWritebackToken = 7270 (static_cast<ARMOperand*>(Operands[3])->isToken() && 7271 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 7272 bool listContainsBase; 7273 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 7274 (!listContainsBase && !hasWritebackToken) || 7275 (listContainsBase && hasWritebackToken)) { 7276 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 7277 assert (isThumbTwo()); 7278 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 7279 // If we're switching to the updating version, we need to insert 7280 // the writeback tied operand. 7281 if (hasWritebackToken) 7282 Inst.insert(Inst.begin(), 7283 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 7284 return true; 7285 } 7286 break; 7287 } 7288 case ARM::tSTMIA_UPD: { 7289 // If the register list contains any high registers, we need to use 7290 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 7291 // should have generated an error in validateInstruction(). 7292 unsigned Rn = Inst.getOperand(0).getReg(); 7293 bool listContainsBase; 7294 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 7295 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 7296 assert (isThumbTwo()); 7297 Inst.setOpcode(ARM::t2STMIA_UPD); 7298 return true; 7299 } 7300 break; 7301 } 7302 case ARM::tPOP: { 7303 bool listContainsBase; 7304 // If the register list contains any high registers, we need to use 7305 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 7306 // should have generated an error in validateInstruction(). 7307 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 7308 return false; 7309 assert (isThumbTwo()); 7310 Inst.setOpcode(ARM::t2LDMIA_UPD); 7311 // Add the base register and writeback operands. 7312 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 7313 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 7314 return true; 7315 } 7316 case ARM::tPUSH: { 7317 bool listContainsBase; 7318 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 7319 return false; 7320 assert (isThumbTwo()); 7321 Inst.setOpcode(ARM::t2STMDB_UPD); 7322 // Add the base register and writeback operands. 7323 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 7324 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 7325 return true; 7326 } 7327 case ARM::t2MOVi: { 7328 // If we can use the 16-bit encoding and the user didn't explicitly 7329 // request the 32-bit variant, transform it here. 7330 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 7331 (unsigned)Inst.getOperand(1).getImm() <= 255 && 7332 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 7333 Inst.getOperand(4).getReg() == ARM::CPSR) || 7334 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 7335 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 7336 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 7337 // The operands aren't in the same order for tMOVi8... 7338 MCInst TmpInst; 7339 TmpInst.setOpcode(ARM::tMOVi8); 7340 TmpInst.addOperand(Inst.getOperand(0)); 7341 TmpInst.addOperand(Inst.getOperand(4)); 7342 TmpInst.addOperand(Inst.getOperand(1)); 7343 TmpInst.addOperand(Inst.getOperand(2)); 7344 TmpInst.addOperand(Inst.getOperand(3)); 7345 Inst = TmpInst; 7346 return true; 7347 } 7348 break; 7349 } 7350 case ARM::t2MOVr: { 7351 // If we can use the 16-bit encoding and the user didn't explicitly 7352 // request the 32-bit variant, transform it here. 7353 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 7354 isARMLowRegister(Inst.getOperand(1).getReg()) && 7355 Inst.getOperand(2).getImm() == ARMCC::AL && 7356 Inst.getOperand(4).getReg() == ARM::CPSR && 7357 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 7358 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 7359 // The operands aren't the same for tMOV[S]r... (no cc_out) 7360 MCInst TmpInst; 7361 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 7362 TmpInst.addOperand(Inst.getOperand(0)); 7363 TmpInst.addOperand(Inst.getOperand(1)); 7364 TmpInst.addOperand(Inst.getOperand(2)); 7365 TmpInst.addOperand(Inst.getOperand(3)); 7366 Inst = TmpInst; 7367 return true; 7368 } 7369 break; 7370 } 7371 case ARM::t2SXTH: 7372 case ARM::t2SXTB: 7373 case ARM::t2UXTH: 7374 case ARM::t2UXTB: { 7375 // If we can use the 16-bit encoding and the user didn't explicitly 7376 // request the 32-bit variant, transform it here. 7377 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 7378 isARMLowRegister(Inst.getOperand(1).getReg()) && 7379 Inst.getOperand(2).getImm() == 0 && 7380 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 7381 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 7382 unsigned NewOpc; 7383 switch (Inst.getOpcode()) { 7384 default: llvm_unreachable("Illegal opcode!"); 7385 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 7386 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 7387 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 7388 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 7389 } 7390 // The operands aren't the same for thumb1 (no rotate operand). 7391 MCInst TmpInst; 7392 TmpInst.setOpcode(NewOpc); 7393 TmpInst.addOperand(Inst.getOperand(0)); 7394 TmpInst.addOperand(Inst.getOperand(1)); 7395 TmpInst.addOperand(Inst.getOperand(3)); 7396 TmpInst.addOperand(Inst.getOperand(4)); 7397 Inst = TmpInst; 7398 return true; 7399 } 7400 break; 7401 } 7402 case ARM::MOVsi: { 7403 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm()); 7404 // rrx shifts and asr/lsr of #32 is encoded as 0 7405 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr) 7406 return false; 7407 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) { 7408 // Shifting by zero is accepted as a vanilla 'MOVr' 7409 MCInst TmpInst; 7410 TmpInst.setOpcode(ARM::MOVr); 7411 TmpInst.addOperand(Inst.getOperand(0)); 7412 TmpInst.addOperand(Inst.getOperand(1)); 7413 TmpInst.addOperand(Inst.getOperand(3)); 7414 TmpInst.addOperand(Inst.getOperand(4)); 7415 TmpInst.addOperand(Inst.getOperand(5)); 7416 Inst = TmpInst; 7417 return true; 7418 } 7419 return false; 7420 } 7421 case ARM::ANDrsi: 7422 case ARM::ORRrsi: 7423 case ARM::EORrsi: 7424 case ARM::BICrsi: 7425 case ARM::SUBrsi: 7426 case ARM::ADDrsi: { 7427 unsigned newOpc; 7428 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm()); 7429 if (SOpc == ARM_AM::rrx) return false; 7430 switch (Inst.getOpcode()) { 7431 default: llvm_unreachable("unexpected opcode!"); 7432 case ARM::ANDrsi: newOpc = ARM::ANDrr; break; 7433 case ARM::ORRrsi: newOpc = ARM::ORRrr; break; 7434 case ARM::EORrsi: newOpc = ARM::EORrr; break; 7435 case ARM::BICrsi: newOpc = ARM::BICrr; break; 7436 case ARM::SUBrsi: newOpc = ARM::SUBrr; break; 7437 case ARM::ADDrsi: newOpc = ARM::ADDrr; break; 7438 } 7439 // If the shift is by zero, use the non-shifted instruction definition. 7440 // The exception is for right shifts, where 0 == 32 7441 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 && 7442 !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) { 7443 MCInst TmpInst; 7444 TmpInst.setOpcode(newOpc); 7445 TmpInst.addOperand(Inst.getOperand(0)); 7446 TmpInst.addOperand(Inst.getOperand(1)); 7447 TmpInst.addOperand(Inst.getOperand(2)); 7448 TmpInst.addOperand(Inst.getOperand(4)); 7449 TmpInst.addOperand(Inst.getOperand(5)); 7450 TmpInst.addOperand(Inst.getOperand(6)); 7451 Inst = TmpInst; 7452 return true; 7453 } 7454 return false; 7455 } 7456 case ARM::ITasm: 7457 case ARM::t2IT: { 7458 // The mask bits for all but the first condition are represented as 7459 // the low bit of the condition code value implies 't'. We currently 7460 // always have 1 implies 't', so XOR toggle the bits if the low bit 7461 // of the condition code is zero. 7462 MCOperand &MO = Inst.getOperand(1); 7463 unsigned Mask = MO.getImm(); 7464 unsigned OrigMask = Mask; 7465 unsigned TZ = countTrailingZeros(Mask); 7466 if ((Inst.getOperand(0).getImm() & 1) == 0) { 7467 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 7468 Mask ^= (0xE << TZ) & 0xF; 7469 } 7470 MO.setImm(Mask); 7471 7472 // Set up the IT block state according to the IT instruction we just 7473 // matched. 7474 assert(!inITBlock() && "nested IT blocks?!"); 7475 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 7476 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 7477 ITState.CurPosition = 0; 7478 ITState.FirstCond = true; 7479 break; 7480 } 7481 case ARM::t2LSLrr: 7482 case ARM::t2LSRrr: 7483 case ARM::t2ASRrr: 7484 case ARM::t2SBCrr: 7485 case ARM::t2RORrr: 7486 case ARM::t2BICrr: 7487 { 7488 // Assemblers should use the narrow encodings of these instructions when permissible. 7489 if ((isARMLowRegister(Inst.getOperand(1).getReg()) && 7490 isARMLowRegister(Inst.getOperand(2).getReg())) && 7491 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && 7492 ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) || 7493 (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) && 7494 (!static_cast<ARMOperand*>(Operands[3])->isToken() || 7495 !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) { 7496 unsigned NewOpc; 7497 switch (Inst.getOpcode()) { 7498 default: llvm_unreachable("unexpected opcode"); 7499 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break; 7500 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break; 7501 case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break; 7502 case ARM::t2SBCrr: NewOpc = ARM::tSBC; break; 7503 case ARM::t2RORrr: NewOpc = ARM::tROR; break; 7504 case ARM::t2BICrr: NewOpc = ARM::tBIC; break; 7505 } 7506 MCInst TmpInst; 7507 TmpInst.setOpcode(NewOpc); 7508 TmpInst.addOperand(Inst.getOperand(0)); 7509 TmpInst.addOperand(Inst.getOperand(5)); 7510 TmpInst.addOperand(Inst.getOperand(1)); 7511 TmpInst.addOperand(Inst.getOperand(2)); 7512 TmpInst.addOperand(Inst.getOperand(3)); 7513 TmpInst.addOperand(Inst.getOperand(4)); 7514 Inst = TmpInst; 7515 return true; 7516 } 7517 return false; 7518 } 7519 case ARM::t2ANDrr: 7520 case ARM::t2EORrr: 7521 case ARM::t2ADCrr: 7522 case ARM::t2ORRrr: 7523 { 7524 // Assemblers should use the narrow encodings of these instructions when permissible. 7525 // These instructions are special in that they are commutable, so shorter encodings 7526 // are available more often. 7527 if ((isARMLowRegister(Inst.getOperand(1).getReg()) && 7528 isARMLowRegister(Inst.getOperand(2).getReg())) && 7529 (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() || 7530 Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) && 7531 ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) || 7532 (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) && 7533 (!static_cast<ARMOperand*>(Operands[3])->isToken() || 7534 !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) { 7535 unsigned NewOpc; 7536 switch (Inst.getOpcode()) { 7537 default: llvm_unreachable("unexpected opcode"); 7538 case ARM::t2ADCrr: NewOpc = ARM::tADC; break; 7539 case ARM::t2ANDrr: NewOpc = ARM::tAND; break; 7540 case ARM::t2EORrr: NewOpc = ARM::tEOR; break; 7541 case ARM::t2ORRrr: NewOpc = ARM::tORR; break; 7542 } 7543 MCInst TmpInst; 7544 TmpInst.setOpcode(NewOpc); 7545 TmpInst.addOperand(Inst.getOperand(0)); 7546 TmpInst.addOperand(Inst.getOperand(5)); 7547 if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) { 7548 TmpInst.addOperand(Inst.getOperand(1)); 7549 TmpInst.addOperand(Inst.getOperand(2)); 7550 } else { 7551 TmpInst.addOperand(Inst.getOperand(2)); 7552 TmpInst.addOperand(Inst.getOperand(1)); 7553 } 7554 TmpInst.addOperand(Inst.getOperand(3)); 7555 TmpInst.addOperand(Inst.getOperand(4)); 7556 Inst = TmpInst; 7557 return true; 7558 } 7559 return false; 7560 } 7561 } 7562 return false; 7563 } 7564 7565 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 7566 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 7567 // suffix depending on whether they're in an IT block or not. 7568 unsigned Opc = Inst.getOpcode(); 7569 const MCInstrDesc &MCID = MII.get(Opc); 7570 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 7571 assert(MCID.hasOptionalDef() && 7572 "optionally flag setting instruction missing optional def operand"); 7573 assert(MCID.NumOperands == Inst.getNumOperands() && 7574 "operand count mismatch!"); 7575 // Find the optional-def operand (cc_out). 7576 unsigned OpNo; 7577 for (OpNo = 0; 7578 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 7579 ++OpNo) 7580 ; 7581 // If we're parsing Thumb1, reject it completely. 7582 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 7583 return Match_MnemonicFail; 7584 // If we're parsing Thumb2, which form is legal depends on whether we're 7585 // in an IT block. 7586 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 7587 !inITBlock()) 7588 return Match_RequiresITBlock; 7589 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 7590 inITBlock()) 7591 return Match_RequiresNotITBlock; 7592 } 7593 // Some high-register supporting Thumb1 encodings only allow both registers 7594 // to be from r0-r7 when in Thumb2. 7595 else if (Opc == ARM::tADDhirr && isThumbOne() && 7596 isARMLowRegister(Inst.getOperand(1).getReg()) && 7597 isARMLowRegister(Inst.getOperand(2).getReg())) 7598 return Match_RequiresThumb2; 7599 // Others only require ARMv6 or later. 7600 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 7601 isARMLowRegister(Inst.getOperand(0).getReg()) && 7602 isARMLowRegister(Inst.getOperand(1).getReg())) 7603 return Match_RequiresV6; 7604 return Match_Success; 7605 } 7606 7607 static const char *getSubtargetFeatureName(unsigned Val); 7608 bool ARMAsmParser:: 7609 MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, 7610 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 7611 MCStreamer &Out, unsigned &ErrorInfo, 7612 bool MatchingInlineAsm) { 7613 MCInst Inst; 7614 unsigned MatchResult; 7615 7616 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo, 7617 MatchingInlineAsm); 7618 switch (MatchResult) { 7619 default: break; 7620 case Match_Success: 7621 // Context sensitive operand constraints aren't handled by the matcher, 7622 // so check them here. 7623 if (validateInstruction(Inst, Operands)) { 7624 // Still progress the IT block, otherwise one wrong condition causes 7625 // nasty cascading errors. 7626 forwardITPosition(); 7627 return true; 7628 } 7629 7630 { // processInstruction() updates inITBlock state, we need to save it away 7631 bool wasInITBlock = inITBlock(); 7632 7633 // Some instructions need post-processing to, for example, tweak which 7634 // encoding is selected. Loop on it while changes happen so the 7635 // individual transformations can chain off each other. E.g., 7636 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 7637 while (processInstruction(Inst, Operands)) 7638 ; 7639 7640 // Only after the instruction is fully processed, we can validate it 7641 if (wasInITBlock && hasV8Ops() && isThumb() && 7642 !isV8EligibleForIT(&Inst, 2)) { 7643 Warning(IDLoc, "deprecated instruction in IT block"); 7644 } 7645 } 7646 7647 // Only move forward at the very end so that everything in validate 7648 // and process gets a consistent answer about whether we're in an IT 7649 // block. 7650 forwardITPosition(); 7651 7652 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and 7653 // doesn't actually encode. 7654 if (Inst.getOpcode() == ARM::ITasm) 7655 return false; 7656 7657 Inst.setLoc(IDLoc); 7658 Out.EmitInstruction(Inst); 7659 return false; 7660 case Match_MissingFeature: { 7661 assert(ErrorInfo && "Unknown missing feature!"); 7662 // Special case the error message for the very common case where only 7663 // a single subtarget feature is missing (Thumb vs. ARM, e.g.). 7664 std::string Msg = "instruction requires:"; 7665 unsigned Mask = 1; 7666 for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) { 7667 if (ErrorInfo & Mask) { 7668 Msg += " "; 7669 Msg += getSubtargetFeatureName(ErrorInfo & Mask); 7670 } 7671 Mask <<= 1; 7672 } 7673 return Error(IDLoc, Msg); 7674 } 7675 case Match_InvalidOperand: { 7676 SMLoc ErrorLoc = IDLoc; 7677 if (ErrorInfo != ~0U) { 7678 if (ErrorInfo >= Operands.size()) 7679 return Error(IDLoc, "too few operands for instruction"); 7680 7681 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 7682 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 7683 } 7684 7685 return Error(ErrorLoc, "invalid operand for instruction"); 7686 } 7687 case Match_MnemonicFail: 7688 return Error(IDLoc, "invalid instruction", 7689 ((ARMOperand*)Operands[0])->getLocRange()); 7690 case Match_RequiresNotITBlock: 7691 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 7692 case Match_RequiresITBlock: 7693 return Error(IDLoc, "instruction only valid inside IT block"); 7694 case Match_RequiresV6: 7695 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 7696 case Match_RequiresThumb2: 7697 return Error(IDLoc, "instruction variant requires Thumb2"); 7698 case Match_ImmRange0_15: { 7699 SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 7700 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 7701 return Error(ErrorLoc, "immediate operand must be in the range [0,15]"); 7702 } 7703 } 7704 7705 llvm_unreachable("Implement any new match types added!"); 7706 } 7707 7708 /// parseDirective parses the arm specific directives 7709 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 7710 StringRef IDVal = DirectiveID.getIdentifier(); 7711 if (IDVal == ".word") 7712 return parseDirectiveWord(4, DirectiveID.getLoc()); 7713 else if (IDVal == ".thumb") 7714 return parseDirectiveThumb(DirectiveID.getLoc()); 7715 else if (IDVal == ".arm") 7716 return parseDirectiveARM(DirectiveID.getLoc()); 7717 else if (IDVal == ".thumb_func") 7718 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 7719 else if (IDVal == ".code") 7720 return parseDirectiveCode(DirectiveID.getLoc()); 7721 else if (IDVal == ".syntax") 7722 return parseDirectiveSyntax(DirectiveID.getLoc()); 7723 else if (IDVal == ".unreq") 7724 return parseDirectiveUnreq(DirectiveID.getLoc()); 7725 else if (IDVal == ".arch") 7726 return parseDirectiveArch(DirectiveID.getLoc()); 7727 else if (IDVal == ".eabi_attribute") 7728 return parseDirectiveEabiAttr(DirectiveID.getLoc()); 7729 else if (IDVal == ".fnstart") 7730 return parseDirectiveFnStart(DirectiveID.getLoc()); 7731 else if (IDVal == ".fnend") 7732 return parseDirectiveFnEnd(DirectiveID.getLoc()); 7733 else if (IDVal == ".cantunwind") 7734 return parseDirectiveCantUnwind(DirectiveID.getLoc()); 7735 else if (IDVal == ".personality") 7736 return parseDirectivePersonality(DirectiveID.getLoc()); 7737 else if (IDVal == ".handlerdata") 7738 return parseDirectiveHandlerData(DirectiveID.getLoc()); 7739 else if (IDVal == ".setfp") 7740 return parseDirectiveSetFP(DirectiveID.getLoc()); 7741 else if (IDVal == ".pad") 7742 return parseDirectivePad(DirectiveID.getLoc()); 7743 else if (IDVal == ".save") 7744 return parseDirectiveRegSave(DirectiveID.getLoc(), false); 7745 else if (IDVal == ".vsave") 7746 return parseDirectiveRegSave(DirectiveID.getLoc(), true); 7747 return true; 7748 } 7749 7750 /// parseDirectiveWord 7751 /// ::= .word [ expression (, expression)* ] 7752 bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 7753 if (getLexer().isNot(AsmToken::EndOfStatement)) { 7754 for (;;) { 7755 const MCExpr *Value; 7756 if (getParser().parseExpression(Value)) 7757 return true; 7758 7759 getParser().getStreamer().EmitValue(Value, Size); 7760 7761 if (getLexer().is(AsmToken::EndOfStatement)) 7762 break; 7763 7764 // FIXME: Improve diagnostic. 7765 if (getLexer().isNot(AsmToken::Comma)) 7766 return Error(L, "unexpected token in directive"); 7767 Parser.Lex(); 7768 } 7769 } 7770 7771 Parser.Lex(); 7772 return false; 7773 } 7774 7775 /// parseDirectiveThumb 7776 /// ::= .thumb 7777 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 7778 if (getLexer().isNot(AsmToken::EndOfStatement)) 7779 return Error(L, "unexpected token in directive"); 7780 Parser.Lex(); 7781 7782 if (!hasThumb()) 7783 return Error(L, "target does not support Thumb mode"); 7784 7785 if (!isThumb()) 7786 SwitchMode(); 7787 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 7788 return false; 7789 } 7790 7791 /// parseDirectiveARM 7792 /// ::= .arm 7793 bool ARMAsmParser::parseDirectiveARM(SMLoc L) { 7794 if (getLexer().isNot(AsmToken::EndOfStatement)) 7795 return Error(L, "unexpected token in directive"); 7796 Parser.Lex(); 7797 7798 if (!hasARM()) 7799 return Error(L, "target does not support ARM mode"); 7800 7801 if (isThumb()) 7802 SwitchMode(); 7803 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 7804 return false; 7805 } 7806 7807 /// parseDirectiveThumbFunc 7808 /// ::= .thumbfunc symbol_name 7809 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 7810 const MCAsmInfo *MAI = getParser().getStreamer().getContext().getAsmInfo(); 7811 bool isMachO = MAI->hasSubsectionsViaSymbols(); 7812 StringRef Name; 7813 bool needFuncName = true; 7814 7815 // Darwin asm has (optionally) function name after .thumb_func direction 7816 // ELF doesn't 7817 if (isMachO) { 7818 const AsmToken &Tok = Parser.getTok(); 7819 if (Tok.isNot(AsmToken::EndOfStatement)) { 7820 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 7821 return Error(L, "unexpected token in .thumb_func directive"); 7822 Name = Tok.getIdentifier(); 7823 Parser.Lex(); // Consume the identifier token. 7824 needFuncName = false; 7825 } 7826 } 7827 7828 if (getLexer().isNot(AsmToken::EndOfStatement)) 7829 return Error(L, "unexpected token in directive"); 7830 7831 // Eat the end of statement and any blank lines that follow. 7832 while (getLexer().is(AsmToken::EndOfStatement)) 7833 Parser.Lex(); 7834 7835 // FIXME: assuming function name will be the line following .thumb_func 7836 // We really should be checking the next symbol definition even if there's 7837 // stuff in between. 7838 if (needFuncName) { 7839 Name = Parser.getTok().getIdentifier(); 7840 } 7841 7842 // Mark symbol as a thumb symbol. 7843 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 7844 getParser().getStreamer().EmitThumbFunc(Func); 7845 return false; 7846 } 7847 7848 /// parseDirectiveSyntax 7849 /// ::= .syntax unified | divided 7850 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 7851 const AsmToken &Tok = Parser.getTok(); 7852 if (Tok.isNot(AsmToken::Identifier)) 7853 return Error(L, "unexpected token in .syntax directive"); 7854 StringRef Mode = Tok.getString(); 7855 if (Mode == "unified" || Mode == "UNIFIED") 7856 Parser.Lex(); 7857 else if (Mode == "divided" || Mode == "DIVIDED") 7858 return Error(L, "'.syntax divided' arm asssembly not supported"); 7859 else 7860 return Error(L, "unrecognized syntax mode in .syntax directive"); 7861 7862 if (getLexer().isNot(AsmToken::EndOfStatement)) 7863 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 7864 Parser.Lex(); 7865 7866 // TODO tell the MC streamer the mode 7867 // getParser().getStreamer().Emit???(); 7868 return false; 7869 } 7870 7871 /// parseDirectiveCode 7872 /// ::= .code 16 | 32 7873 bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 7874 const AsmToken &Tok = Parser.getTok(); 7875 if (Tok.isNot(AsmToken::Integer)) 7876 return Error(L, "unexpected token in .code directive"); 7877 int64_t Val = Parser.getTok().getIntVal(); 7878 if (Val == 16) 7879 Parser.Lex(); 7880 else if (Val == 32) 7881 Parser.Lex(); 7882 else 7883 return Error(L, "invalid operand to .code directive"); 7884 7885 if (getLexer().isNot(AsmToken::EndOfStatement)) 7886 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 7887 Parser.Lex(); 7888 7889 if (Val == 16) { 7890 if (!hasThumb()) 7891 return Error(L, "target does not support Thumb mode"); 7892 7893 if (!isThumb()) 7894 SwitchMode(); 7895 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 7896 } else { 7897 if (!hasARM()) 7898 return Error(L, "target does not support ARM mode"); 7899 7900 if (isThumb()) 7901 SwitchMode(); 7902 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 7903 } 7904 7905 return false; 7906 } 7907 7908 /// parseDirectiveReq 7909 /// ::= name .req registername 7910 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { 7911 Parser.Lex(); // Eat the '.req' token. 7912 unsigned Reg; 7913 SMLoc SRegLoc, ERegLoc; 7914 if (ParseRegister(Reg, SRegLoc, ERegLoc)) { 7915 Parser.eatToEndOfStatement(); 7916 return Error(SRegLoc, "register name expected"); 7917 } 7918 7919 // Shouldn't be anything else. 7920 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) { 7921 Parser.eatToEndOfStatement(); 7922 return Error(Parser.getTok().getLoc(), 7923 "unexpected input in .req directive."); 7924 } 7925 7926 Parser.Lex(); // Consume the EndOfStatement 7927 7928 if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg) 7929 return Error(SRegLoc, "redefinition of '" + Name + 7930 "' does not match original."); 7931 7932 return false; 7933 } 7934 7935 /// parseDirectiveUneq 7936 /// ::= .unreq registername 7937 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) { 7938 if (Parser.getTok().isNot(AsmToken::Identifier)) { 7939 Parser.eatToEndOfStatement(); 7940 return Error(L, "unexpected input in .unreq directive."); 7941 } 7942 RegisterReqs.erase(Parser.getTok().getIdentifier()); 7943 Parser.Lex(); // Eat the identifier. 7944 return false; 7945 } 7946 7947 /// parseDirectiveArch 7948 /// ::= .arch token 7949 bool ARMAsmParser::parseDirectiveArch(SMLoc L) { 7950 return true; 7951 } 7952 7953 /// parseDirectiveEabiAttr 7954 /// ::= .eabi_attribute int, int 7955 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) { 7956 return true; 7957 } 7958 7959 /// parseDirectiveFnStart 7960 /// ::= .fnstart 7961 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) { 7962 if (FnStartLoc.isValid()) { 7963 Error(L, ".fnstart starts before the end of previous one"); 7964 Error(FnStartLoc, "previous .fnstart starts here"); 7965 return true; 7966 } 7967 7968 FnStartLoc = L; 7969 getTargetStreamer().emitFnStart(); 7970 return false; 7971 } 7972 7973 /// parseDirectiveFnEnd 7974 /// ::= .fnend 7975 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) { 7976 // Check the ordering of unwind directives 7977 if (!FnStartLoc.isValid()) 7978 return Error(L, ".fnstart must precede .fnend directive"); 7979 7980 // Reset the unwind directives parser state 7981 resetUnwindDirectiveParserState(); 7982 getTargetStreamer().emitFnEnd(); 7983 return false; 7984 } 7985 7986 /// parseDirectiveCantUnwind 7987 /// ::= .cantunwind 7988 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) { 7989 // Check the ordering of unwind directives 7990 CantUnwindLoc = L; 7991 if (!FnStartLoc.isValid()) 7992 return Error(L, ".fnstart must precede .cantunwind directive"); 7993 if (HandlerDataLoc.isValid()) { 7994 Error(L, ".cantunwind can't be used with .handlerdata directive"); 7995 Error(HandlerDataLoc, ".handlerdata was specified here"); 7996 return true; 7997 } 7998 if (PersonalityLoc.isValid()) { 7999 Error(L, ".cantunwind can't be used with .personality directive"); 8000 Error(PersonalityLoc, ".personality was specified here"); 8001 return true; 8002 } 8003 8004 getTargetStreamer().emitCantUnwind(); 8005 return false; 8006 } 8007 8008 /// parseDirectivePersonality 8009 /// ::= .personality name 8010 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) { 8011 // Check the ordering of unwind directives 8012 PersonalityLoc = L; 8013 if (!FnStartLoc.isValid()) 8014 return Error(L, ".fnstart must precede .personality directive"); 8015 if (CantUnwindLoc.isValid()) { 8016 Error(L, ".personality can't be used with .cantunwind directive"); 8017 Error(CantUnwindLoc, ".cantunwind was specified here"); 8018 return true; 8019 } 8020 if (HandlerDataLoc.isValid()) { 8021 Error(L, ".personality must precede .handlerdata directive"); 8022 Error(HandlerDataLoc, ".handlerdata was specified here"); 8023 return true; 8024 } 8025 8026 // Parse the name of the personality routine 8027 if (Parser.getTok().isNot(AsmToken::Identifier)) { 8028 Parser.eatToEndOfStatement(); 8029 return Error(L, "unexpected input in .personality directive."); 8030 } 8031 StringRef Name(Parser.getTok().getIdentifier()); 8032 Parser.Lex(); 8033 8034 MCSymbol *PR = getParser().getContext().GetOrCreateSymbol(Name); 8035 getTargetStreamer().emitPersonality(PR); 8036 return false; 8037 } 8038 8039 /// parseDirectiveHandlerData 8040 /// ::= .handlerdata 8041 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) { 8042 // Check the ordering of unwind directives 8043 HandlerDataLoc = L; 8044 if (!FnStartLoc.isValid()) 8045 return Error(L, ".fnstart must precede .personality directive"); 8046 if (CantUnwindLoc.isValid()) { 8047 Error(L, ".handlerdata can't be used with .cantunwind directive"); 8048 Error(CantUnwindLoc, ".cantunwind was specified here"); 8049 return true; 8050 } 8051 8052 getTargetStreamer().emitHandlerData(); 8053 return false; 8054 } 8055 8056 /// parseDirectiveSetFP 8057 /// ::= .setfp fpreg, spreg [, offset] 8058 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) { 8059 // Check the ordering of unwind directives 8060 if (!FnStartLoc.isValid()) 8061 return Error(L, ".fnstart must precede .setfp directive"); 8062 if (HandlerDataLoc.isValid()) 8063 return Error(L, ".setfp must precede .handlerdata directive"); 8064 8065 // Parse fpreg 8066 SMLoc NewFPRegLoc = Parser.getTok().getLoc(); 8067 int NewFPReg = tryParseRegister(); 8068 if (NewFPReg == -1) 8069 return Error(NewFPRegLoc, "frame pointer register expected"); 8070 8071 // Consume comma 8072 if (!Parser.getTok().is(AsmToken::Comma)) 8073 return Error(Parser.getTok().getLoc(), "comma expected"); 8074 Parser.Lex(); // skip comma 8075 8076 // Parse spreg 8077 SMLoc NewSPRegLoc = Parser.getTok().getLoc(); 8078 int NewSPReg = tryParseRegister(); 8079 if (NewSPReg == -1) 8080 return Error(NewSPRegLoc, "stack pointer register expected"); 8081 8082 if (NewSPReg != ARM::SP && NewSPReg != FPReg) 8083 return Error(NewSPRegLoc, 8084 "register should be either $sp or the latest fp register"); 8085 8086 // Update the frame pointer register 8087 FPReg = NewFPReg; 8088 8089 // Parse offset 8090 int64_t Offset = 0; 8091 if (Parser.getTok().is(AsmToken::Comma)) { 8092 Parser.Lex(); // skip comma 8093 8094 if (Parser.getTok().isNot(AsmToken::Hash) && 8095 Parser.getTok().isNot(AsmToken::Dollar)) { 8096 return Error(Parser.getTok().getLoc(), "'#' expected"); 8097 } 8098 Parser.Lex(); // skip hash token. 8099 8100 const MCExpr *OffsetExpr; 8101 SMLoc ExLoc = Parser.getTok().getLoc(); 8102 SMLoc EndLoc; 8103 if (getParser().parseExpression(OffsetExpr, EndLoc)) 8104 return Error(ExLoc, "malformed setfp offset"); 8105 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); 8106 if (!CE) 8107 return Error(ExLoc, "setfp offset must be an immediate"); 8108 8109 Offset = CE->getValue(); 8110 } 8111 8112 getTargetStreamer().emitSetFP(static_cast<unsigned>(NewFPReg), 8113 static_cast<unsigned>(NewSPReg), Offset); 8114 return false; 8115 } 8116 8117 /// parseDirective 8118 /// ::= .pad offset 8119 bool ARMAsmParser::parseDirectivePad(SMLoc L) { 8120 // Check the ordering of unwind directives 8121 if (!FnStartLoc.isValid()) 8122 return Error(L, ".fnstart must precede .pad directive"); 8123 if (HandlerDataLoc.isValid()) 8124 return Error(L, ".pad must precede .handlerdata directive"); 8125 8126 // Parse the offset 8127 if (Parser.getTok().isNot(AsmToken::Hash) && 8128 Parser.getTok().isNot(AsmToken::Dollar)) { 8129 return Error(Parser.getTok().getLoc(), "'#' expected"); 8130 } 8131 Parser.Lex(); // skip hash token. 8132 8133 const MCExpr *OffsetExpr; 8134 SMLoc ExLoc = Parser.getTok().getLoc(); 8135 SMLoc EndLoc; 8136 if (getParser().parseExpression(OffsetExpr, EndLoc)) 8137 return Error(ExLoc, "malformed pad offset"); 8138 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); 8139 if (!CE) 8140 return Error(ExLoc, "pad offset must be an immediate"); 8141 8142 getTargetStreamer().emitPad(CE->getValue()); 8143 return false; 8144 } 8145 8146 /// parseDirectiveRegSave 8147 /// ::= .save { registers } 8148 /// ::= .vsave { registers } 8149 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) { 8150 // Check the ordering of unwind directives 8151 if (!FnStartLoc.isValid()) 8152 return Error(L, ".fnstart must precede .save or .vsave directives"); 8153 if (HandlerDataLoc.isValid()) 8154 return Error(L, ".save or .vsave must precede .handlerdata directive"); 8155 8156 // RAII object to make sure parsed operands are deleted. 8157 struct CleanupObject { 8158 SmallVector<MCParsedAsmOperand *, 1> Operands; 8159 ~CleanupObject() { 8160 for (unsigned I = 0, E = Operands.size(); I != E; ++I) 8161 delete Operands[I]; 8162 } 8163 } CO; 8164 8165 // Parse the register list 8166 if (parseRegisterList(CO.Operands)) 8167 return true; 8168 ARMOperand *Op = (ARMOperand*)CO.Operands[0]; 8169 if (!IsVector && !Op->isRegList()) 8170 return Error(L, ".save expects GPR registers"); 8171 if (IsVector && !Op->isDPRRegList()) 8172 return Error(L, ".vsave expects DPR registers"); 8173 8174 getTargetStreamer().emitRegSave(Op->getRegList(), IsVector); 8175 return false; 8176 } 8177 8178 /// Force static initialization. 8179 extern "C" void LLVMInitializeARMAsmParser() { 8180 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 8181 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 8182 } 8183 8184 #define GET_REGISTER_MATCHER 8185 #define GET_SUBTARGET_FEATURE_NAME 8186 #define GET_MATCHER_IMPLEMENTATION 8187 #include "ARMGenAsmMatcher.inc" 8188 8189 // Define this matcher function after the auto-generated include so we 8190 // have the match class enum definitions. 8191 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp, 8192 unsigned Kind) { 8193 ARMOperand *Op = static_cast<ARMOperand*>(AsmOp); 8194 // If the kind is a token for a literal immediate, check if our asm 8195 // operand matches. This is for InstAliases which have a fixed-value 8196 // immediate in the syntax. 8197 if (Kind == MCK__35_0 && Op->isImm()) { 8198 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 8199 if (!CE) 8200 return Match_InvalidOperand; 8201 if (CE->getValue() == 0) 8202 return Match_Success; 8203 } 8204 return Match_InvalidOperand; 8205 } 8206