1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "MCTargetDesc/ARMBaseInfo.h" 11 #include "MCTargetDesc/ARMAddressingModes.h" 12 #include "MCTargetDesc/ARMMCExpr.h" 13 #include "llvm/MC/MCParser/MCAsmLexer.h" 14 #include "llvm/MC/MCParser/MCAsmParser.h" 15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16 #include "llvm/MC/MCAsmInfo.h" 17 #include "llvm/MC/MCContext.h" 18 #include "llvm/MC/MCStreamer.h" 19 #include "llvm/MC/MCExpr.h" 20 #include "llvm/MC/MCInst.h" 21 #include "llvm/MC/MCInstrDesc.h" 22 #include "llvm/MC/MCRegisterInfo.h" 23 #include "llvm/MC/MCSubtargetInfo.h" 24 #include "llvm/MC/MCTargetAsmParser.h" 25 #include "llvm/Support/MathExtras.h" 26 #include "llvm/Support/SourceMgr.h" 27 #include "llvm/Support/TargetRegistry.h" 28 #include "llvm/Support/raw_ostream.h" 29 #include "llvm/ADT/BitVector.h" 30 #include "llvm/ADT/OwningPtr.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/StringSwitch.h" 34 #include "llvm/ADT/Twine.h" 35 36 using namespace llvm; 37 38 namespace { 39 40 class ARMOperand; 41 42 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; 43 44 class ARMAsmParser : public MCTargetAsmParser { 45 MCSubtargetInfo &STI; 46 MCAsmParser &Parser; 47 const MCRegisterInfo *MRI; 48 49 // Map of register aliases registers via the .req directive. 50 StringMap<unsigned> RegisterReqs; 51 52 struct { 53 ARMCC::CondCodes Cond; // Condition for IT block. 54 unsigned Mask:4; // Condition mask for instructions. 55 // Starting at first 1 (from lsb). 56 // '1' condition as indicated in IT. 57 // '0' inverse of condition (else). 58 // Count of instructions in IT block is 59 // 4 - trailingzeroes(mask) 60 61 bool FirstCond; // Explicit flag for when we're parsing the 62 // First instruction in the IT block. It's 63 // implied in the mask, so needs special 64 // handling. 65 66 unsigned CurPosition; // Current position in parsing of IT 67 // block. In range [0,3]. Initialized 68 // according to count of instructions in block. 69 // ~0U if no active IT block. 70 } ITState; 71 bool inITBlock() { return ITState.CurPosition != ~0U;} 72 void forwardITPosition() { 73 if (!inITBlock()) return; 74 // Move to the next instruction in the IT block, if there is one. If not, 75 // mark the block as done. 76 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 77 if (++ITState.CurPosition == 5 - TZ) 78 ITState.CurPosition = ~0U; // Done with the IT block after this. 79 } 80 81 82 MCAsmParser &getParser() const { return Parser; } 83 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 84 85 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 86 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 87 88 int tryParseRegister(); 89 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 90 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 91 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 92 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 93 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 94 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 95 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 96 unsigned &ShiftAmount); 97 bool parseDirectiveWord(unsigned Size, SMLoc L); 98 bool parseDirectiveThumb(SMLoc L); 99 bool parseDirectiveARM(SMLoc L); 100 bool parseDirectiveThumbFunc(SMLoc L); 101 bool parseDirectiveCode(SMLoc L); 102 bool parseDirectiveSyntax(SMLoc L); 103 bool parseDirectiveReq(StringRef Name, SMLoc L); 104 bool parseDirectiveUnreq(SMLoc L); 105 bool parseDirectiveArch(SMLoc L); 106 bool parseDirectiveEabiAttr(SMLoc L); 107 108 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 109 bool &CarrySetting, unsigned &ProcessorIMod, 110 StringRef &ITMask); 111 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 112 bool &CanAcceptPredicationCode); 113 114 bool isThumb() const { 115 // FIXME: Can tablegen auto-generate this? 116 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 117 } 118 bool isThumbOne() const { 119 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 120 } 121 bool isThumbTwo() const { 122 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 123 } 124 bool hasV6Ops() const { 125 return STI.getFeatureBits() & ARM::HasV6Ops; 126 } 127 bool hasV7Ops() const { 128 return STI.getFeatureBits() & ARM::HasV7Ops; 129 } 130 void SwitchMode() { 131 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 132 setAvailableFeatures(FB); 133 } 134 bool isMClass() const { 135 return STI.getFeatureBits() & ARM::FeatureMClass; 136 } 137 138 /// @name Auto-generated Match Functions 139 /// { 140 141 #define GET_ASSEMBLER_HEADER 142 #include "ARMGenAsmMatcher.inc" 143 144 /// } 145 146 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseCoprocNumOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parseCoprocRegOperand( 150 SmallVectorImpl<MCParsedAsmOperand*>&); 151 OperandMatchResultTy parseCoprocOptionOperand( 152 SmallVectorImpl<MCParsedAsmOperand*>&); 153 OperandMatchResultTy parseMemBarrierOptOperand( 154 SmallVectorImpl<MCParsedAsmOperand*>&); 155 OperandMatchResultTy parseProcIFlagsOperand( 156 SmallVectorImpl<MCParsedAsmOperand*>&); 157 OperandMatchResultTy parseMSRMaskOperand( 158 SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 160 StringRef Op, int Low, int High); 161 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 162 return parsePKHImm(O, "lsl", 0, 31); 163 } 164 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 165 return parsePKHImm(O, "asr", 1, 32); 166 } 167 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 168 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 169 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 170 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 171 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 172 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 173 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 174 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 175 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index); 176 177 // Asm Match Converter Methods 178 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 179 const SmallVectorImpl<MCParsedAsmOperand*> &); 180 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 181 const SmallVectorImpl<MCParsedAsmOperand*> &); 182 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 183 const SmallVectorImpl<MCParsedAsmOperand*> &); 184 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 185 const SmallVectorImpl<MCParsedAsmOperand*> &); 186 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 187 const SmallVectorImpl<MCParsedAsmOperand*> &); 188 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 189 const SmallVectorImpl<MCParsedAsmOperand*> &); 190 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 191 const SmallVectorImpl<MCParsedAsmOperand*> &); 192 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 193 const SmallVectorImpl<MCParsedAsmOperand*> &); 194 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 195 const SmallVectorImpl<MCParsedAsmOperand*> &); 196 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 197 const SmallVectorImpl<MCParsedAsmOperand*> &); 198 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 199 const SmallVectorImpl<MCParsedAsmOperand*> &); 200 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 201 const SmallVectorImpl<MCParsedAsmOperand*> &); 202 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 203 const SmallVectorImpl<MCParsedAsmOperand*> &); 204 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 205 const SmallVectorImpl<MCParsedAsmOperand*> &); 206 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 207 const SmallVectorImpl<MCParsedAsmOperand*> &); 208 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 209 const SmallVectorImpl<MCParsedAsmOperand*> &); 210 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 211 const SmallVectorImpl<MCParsedAsmOperand*> &); 212 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 213 const SmallVectorImpl<MCParsedAsmOperand*> &); 214 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 215 const SmallVectorImpl<MCParsedAsmOperand*> &); 216 bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 217 const SmallVectorImpl<MCParsedAsmOperand*> &); 218 bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 219 const SmallVectorImpl<MCParsedAsmOperand*> &); 220 221 bool validateInstruction(MCInst &Inst, 222 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 223 bool processInstruction(MCInst &Inst, 224 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 225 bool shouldOmitCCOutOperand(StringRef Mnemonic, 226 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 227 228 public: 229 enum ARMMatchResultTy { 230 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 231 Match_RequiresNotITBlock, 232 Match_RequiresV6, 233 Match_RequiresThumb2 234 }; 235 236 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 237 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 238 MCAsmParserExtension::Initialize(_Parser); 239 240 // Cache the MCRegisterInfo. 241 MRI = &getContext().getRegisterInfo(); 242 243 // Initialize the set of available features. 244 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 245 246 // Not in an ITBlock to start with. 247 ITState.CurPosition = ~0U; 248 } 249 250 // Implementation of the MCTargetAsmParser interface: 251 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 252 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 253 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 254 bool ParseDirective(AsmToken DirectiveID); 255 256 unsigned checkTargetMatchPredicate(MCInst &Inst); 257 258 bool MatchAndEmitInstruction(SMLoc IDLoc, 259 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 260 MCStreamer &Out); 261 }; 262 } // end anonymous namespace 263 264 namespace { 265 266 /// ARMOperand - Instances of this class represent a parsed ARM machine 267 /// instruction. 268 class ARMOperand : public MCParsedAsmOperand { 269 enum KindTy { 270 k_CondCode, 271 k_CCOut, 272 k_ITCondMask, 273 k_CoprocNum, 274 k_CoprocReg, 275 k_CoprocOption, 276 k_Immediate, 277 k_MemBarrierOpt, 278 k_Memory, 279 k_PostIndexRegister, 280 k_MSRMask, 281 k_ProcIFlags, 282 k_VectorIndex, 283 k_Register, 284 k_RegisterList, 285 k_DPRRegisterList, 286 k_SPRRegisterList, 287 k_VectorList, 288 k_VectorListAllLanes, 289 k_VectorListIndexed, 290 k_ShiftedRegister, 291 k_ShiftedImmediate, 292 k_ShifterImmediate, 293 k_RotateImmediate, 294 k_BitfieldDescriptor, 295 k_Token 296 } Kind; 297 298 SMLoc StartLoc, EndLoc; 299 SmallVector<unsigned, 8> Registers; 300 301 union { 302 struct { 303 ARMCC::CondCodes Val; 304 } CC; 305 306 struct { 307 unsigned Val; 308 } Cop; 309 310 struct { 311 unsigned Val; 312 } CoprocOption; 313 314 struct { 315 unsigned Mask:4; 316 } ITMask; 317 318 struct { 319 ARM_MB::MemBOpt Val; 320 } MBOpt; 321 322 struct { 323 ARM_PROC::IFlags Val; 324 } IFlags; 325 326 struct { 327 unsigned Val; 328 } MMask; 329 330 struct { 331 const char *Data; 332 unsigned Length; 333 } Tok; 334 335 struct { 336 unsigned RegNum; 337 } Reg; 338 339 // A vector register list is a sequential list of 1 to 4 registers. 340 struct { 341 unsigned RegNum; 342 unsigned Count; 343 unsigned LaneIndex; 344 bool isDoubleSpaced; 345 } VectorList; 346 347 struct { 348 unsigned Val; 349 } VectorIndex; 350 351 struct { 352 const MCExpr *Val; 353 } Imm; 354 355 /// Combined record for all forms of ARM address expressions. 356 struct { 357 unsigned BaseRegNum; 358 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 359 // was specified. 360 const MCConstantExpr *OffsetImm; // Offset immediate value 361 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 362 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 363 unsigned ShiftImm; // shift for OffsetReg. 364 unsigned Alignment; // 0 = no alignment specified 365 // n = alignment in bytes (2, 4, 8, 16, or 32) 366 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 367 } Memory; 368 369 struct { 370 unsigned RegNum; 371 bool isAdd; 372 ARM_AM::ShiftOpc ShiftTy; 373 unsigned ShiftImm; 374 } PostIdxReg; 375 376 struct { 377 bool isASR; 378 unsigned Imm; 379 } ShifterImm; 380 struct { 381 ARM_AM::ShiftOpc ShiftTy; 382 unsigned SrcReg; 383 unsigned ShiftReg; 384 unsigned ShiftImm; 385 } RegShiftedReg; 386 struct { 387 ARM_AM::ShiftOpc ShiftTy; 388 unsigned SrcReg; 389 unsigned ShiftImm; 390 } RegShiftedImm; 391 struct { 392 unsigned Imm; 393 } RotImm; 394 struct { 395 unsigned LSB; 396 unsigned Width; 397 } Bitfield; 398 }; 399 400 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 401 public: 402 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 403 Kind = o.Kind; 404 StartLoc = o.StartLoc; 405 EndLoc = o.EndLoc; 406 switch (Kind) { 407 case k_CondCode: 408 CC = o.CC; 409 break; 410 case k_ITCondMask: 411 ITMask = o.ITMask; 412 break; 413 case k_Token: 414 Tok = o.Tok; 415 break; 416 case k_CCOut: 417 case k_Register: 418 Reg = o.Reg; 419 break; 420 case k_RegisterList: 421 case k_DPRRegisterList: 422 case k_SPRRegisterList: 423 Registers = o.Registers; 424 break; 425 case k_VectorList: 426 case k_VectorListAllLanes: 427 case k_VectorListIndexed: 428 VectorList = o.VectorList; 429 break; 430 case k_CoprocNum: 431 case k_CoprocReg: 432 Cop = o.Cop; 433 break; 434 case k_CoprocOption: 435 CoprocOption = o.CoprocOption; 436 break; 437 case k_Immediate: 438 Imm = o.Imm; 439 break; 440 case k_MemBarrierOpt: 441 MBOpt = o.MBOpt; 442 break; 443 case k_Memory: 444 Memory = o.Memory; 445 break; 446 case k_PostIndexRegister: 447 PostIdxReg = o.PostIdxReg; 448 break; 449 case k_MSRMask: 450 MMask = o.MMask; 451 break; 452 case k_ProcIFlags: 453 IFlags = o.IFlags; 454 break; 455 case k_ShifterImmediate: 456 ShifterImm = o.ShifterImm; 457 break; 458 case k_ShiftedRegister: 459 RegShiftedReg = o.RegShiftedReg; 460 break; 461 case k_ShiftedImmediate: 462 RegShiftedImm = o.RegShiftedImm; 463 break; 464 case k_RotateImmediate: 465 RotImm = o.RotImm; 466 break; 467 case k_BitfieldDescriptor: 468 Bitfield = o.Bitfield; 469 break; 470 case k_VectorIndex: 471 VectorIndex = o.VectorIndex; 472 break; 473 } 474 } 475 476 /// getStartLoc - Get the location of the first token of this operand. 477 SMLoc getStartLoc() const { return StartLoc; } 478 /// getEndLoc - Get the location of the last token of this operand. 479 SMLoc getEndLoc() const { return EndLoc; } 480 481 ARMCC::CondCodes getCondCode() const { 482 assert(Kind == k_CondCode && "Invalid access!"); 483 return CC.Val; 484 } 485 486 unsigned getCoproc() const { 487 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 488 return Cop.Val; 489 } 490 491 StringRef getToken() const { 492 assert(Kind == k_Token && "Invalid access!"); 493 return StringRef(Tok.Data, Tok.Length); 494 } 495 496 unsigned getReg() const { 497 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 498 return Reg.RegNum; 499 } 500 501 const SmallVectorImpl<unsigned> &getRegList() const { 502 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 503 Kind == k_SPRRegisterList) && "Invalid access!"); 504 return Registers; 505 } 506 507 const MCExpr *getImm() const { 508 assert(isImm() && "Invalid access!"); 509 return Imm.Val; 510 } 511 512 unsigned getVectorIndex() const { 513 assert(Kind == k_VectorIndex && "Invalid access!"); 514 return VectorIndex.Val; 515 } 516 517 ARM_MB::MemBOpt getMemBarrierOpt() const { 518 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 519 return MBOpt.Val; 520 } 521 522 ARM_PROC::IFlags getProcIFlags() const { 523 assert(Kind == k_ProcIFlags && "Invalid access!"); 524 return IFlags.Val; 525 } 526 527 unsigned getMSRMask() const { 528 assert(Kind == k_MSRMask && "Invalid access!"); 529 return MMask.Val; 530 } 531 532 bool isCoprocNum() const { return Kind == k_CoprocNum; } 533 bool isCoprocReg() const { return Kind == k_CoprocReg; } 534 bool isCoprocOption() const { return Kind == k_CoprocOption; } 535 bool isCondCode() const { return Kind == k_CondCode; } 536 bool isCCOut() const { return Kind == k_CCOut; } 537 bool isITMask() const { return Kind == k_ITCondMask; } 538 bool isITCondCode() const { return Kind == k_CondCode; } 539 bool isImm() const { return Kind == k_Immediate; } 540 bool isFPImm() const { 541 if (!isImm()) return false; 542 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 543 if (!CE) return false; 544 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); 545 return Val != -1; 546 } 547 bool isFBits16() const { 548 if (!isImm()) return false; 549 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 550 if (!CE) return false; 551 int64_t Value = CE->getValue(); 552 return Value >= 0 && Value <= 16; 553 } 554 bool isFBits32() const { 555 if (!isImm()) return false; 556 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 557 if (!CE) return false; 558 int64_t Value = CE->getValue(); 559 return Value >= 1 && Value <= 32; 560 } 561 bool isImm8s4() const { 562 if (!isImm()) return false; 563 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 564 if (!CE) return false; 565 int64_t Value = CE->getValue(); 566 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 567 } 568 bool isImm0_1020s4() const { 569 if (!isImm()) return false; 570 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 571 if (!CE) return false; 572 int64_t Value = CE->getValue(); 573 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 574 } 575 bool isImm0_508s4() const { 576 if (!isImm()) return false; 577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 578 if (!CE) return false; 579 int64_t Value = CE->getValue(); 580 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 581 } 582 bool isImm0_508s4Neg() const { 583 if (!isImm()) return false; 584 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 585 if (!CE) return false; 586 int64_t Value = -CE->getValue(); 587 // explicitly exclude zero. we want that to use the normal 0_508 version. 588 return ((Value & 3) == 0) && Value > 0 && Value <= 508; 589 } 590 bool isImm0_255() const { 591 if (!isImm()) return false; 592 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 593 if (!CE) return false; 594 int64_t Value = CE->getValue(); 595 return Value >= 0 && Value < 256; 596 } 597 bool isImm0_4095() const { 598 if (!isImm()) return false; 599 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 600 if (!CE) return false; 601 int64_t Value = CE->getValue(); 602 return Value >= 0 && Value < 4096; 603 } 604 bool isImm0_4095Neg() const { 605 if (!isImm()) return false; 606 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 607 if (!CE) return false; 608 int64_t Value = -CE->getValue(); 609 return Value > 0 && Value < 4096; 610 } 611 bool isImm0_1() const { 612 if (!isImm()) return false; 613 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 614 if (!CE) return false; 615 int64_t Value = CE->getValue(); 616 return Value >= 0 && Value < 2; 617 } 618 bool isImm0_3() const { 619 if (!isImm()) return false; 620 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 621 if (!CE) return false; 622 int64_t Value = CE->getValue(); 623 return Value >= 0 && Value < 4; 624 } 625 bool isImm0_7() const { 626 if (!isImm()) return false; 627 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 628 if (!CE) return false; 629 int64_t Value = CE->getValue(); 630 return Value >= 0 && Value < 8; 631 } 632 bool isImm0_15() const { 633 if (!isImm()) return false; 634 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 635 if (!CE) return false; 636 int64_t Value = CE->getValue(); 637 return Value >= 0 && Value < 16; 638 } 639 bool isImm0_31() const { 640 if (!isImm()) return false; 641 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 642 if (!CE) return false; 643 int64_t Value = CE->getValue(); 644 return Value >= 0 && Value < 32; 645 } 646 bool isImm0_63() const { 647 if (!isImm()) return false; 648 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 649 if (!CE) return false; 650 int64_t Value = CE->getValue(); 651 return Value >= 0 && Value < 64; 652 } 653 bool isImm8() const { 654 if (!isImm()) return false; 655 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 656 if (!CE) return false; 657 int64_t Value = CE->getValue(); 658 return Value == 8; 659 } 660 bool isImm16() const { 661 if (!isImm()) return false; 662 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 663 if (!CE) return false; 664 int64_t Value = CE->getValue(); 665 return Value == 16; 666 } 667 bool isImm32() const { 668 if (!isImm()) return false; 669 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 670 if (!CE) return false; 671 int64_t Value = CE->getValue(); 672 return Value == 32; 673 } 674 bool isShrImm8() const { 675 if (!isImm()) return false; 676 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 677 if (!CE) return false; 678 int64_t Value = CE->getValue(); 679 return Value > 0 && Value <= 8; 680 } 681 bool isShrImm16() const { 682 if (!isImm()) return false; 683 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 684 if (!CE) return false; 685 int64_t Value = CE->getValue(); 686 return Value > 0 && Value <= 16; 687 } 688 bool isShrImm32() const { 689 if (!isImm()) return false; 690 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 691 if (!CE) return false; 692 int64_t Value = CE->getValue(); 693 return Value > 0 && Value <= 32; 694 } 695 bool isShrImm64() const { 696 if (!isImm()) return false; 697 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 698 if (!CE) return false; 699 int64_t Value = CE->getValue(); 700 return Value > 0 && Value <= 64; 701 } 702 bool isImm1_7() const { 703 if (!isImm()) return false; 704 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 705 if (!CE) return false; 706 int64_t Value = CE->getValue(); 707 return Value > 0 && Value < 8; 708 } 709 bool isImm1_15() const { 710 if (!isImm()) return false; 711 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 712 if (!CE) return false; 713 int64_t Value = CE->getValue(); 714 return Value > 0 && Value < 16; 715 } 716 bool isImm1_31() const { 717 if (!isImm()) return false; 718 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 719 if (!CE) return false; 720 int64_t Value = CE->getValue(); 721 return Value > 0 && Value < 32; 722 } 723 bool isImm1_16() const { 724 if (!isImm()) return false; 725 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 726 if (!CE) return false; 727 int64_t Value = CE->getValue(); 728 return Value > 0 && Value < 17; 729 } 730 bool isImm1_32() const { 731 if (!isImm()) return false; 732 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 733 if (!CE) return false; 734 int64_t Value = CE->getValue(); 735 return Value > 0 && Value < 33; 736 } 737 bool isImm0_32() const { 738 if (!isImm()) return false; 739 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 740 if (!CE) return false; 741 int64_t Value = CE->getValue(); 742 return Value >= 0 && Value < 33; 743 } 744 bool isImm0_65535() const { 745 if (!isImm()) return false; 746 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 747 if (!CE) return false; 748 int64_t Value = CE->getValue(); 749 return Value >= 0 && Value < 65536; 750 } 751 bool isImm0_65535Expr() const { 752 if (!isImm()) return false; 753 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 754 // If it's not a constant expression, it'll generate a fixup and be 755 // handled later. 756 if (!CE) return true; 757 int64_t Value = CE->getValue(); 758 return Value >= 0 && Value < 65536; 759 } 760 bool isImm24bit() const { 761 if (!isImm()) return false; 762 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 763 if (!CE) return false; 764 int64_t Value = CE->getValue(); 765 return Value >= 0 && Value <= 0xffffff; 766 } 767 bool isImmThumbSR() const { 768 if (!isImm()) return false; 769 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 770 if (!CE) return false; 771 int64_t Value = CE->getValue(); 772 return Value > 0 && Value < 33; 773 } 774 bool isPKHLSLImm() const { 775 if (!isImm()) return false; 776 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 777 if (!CE) return false; 778 int64_t Value = CE->getValue(); 779 return Value >= 0 && Value < 32; 780 } 781 bool isPKHASRImm() const { 782 if (!isImm()) return false; 783 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 784 if (!CE) return false; 785 int64_t Value = CE->getValue(); 786 return Value > 0 && Value <= 32; 787 } 788 bool isARMSOImm() const { 789 if (!isImm()) return false; 790 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 791 if (!CE) return false; 792 int64_t Value = CE->getValue(); 793 return ARM_AM::getSOImmVal(Value) != -1; 794 } 795 bool isARMSOImmNot() const { 796 if (!isImm()) return false; 797 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 798 if (!CE) return false; 799 int64_t Value = CE->getValue(); 800 return ARM_AM::getSOImmVal(~Value) != -1; 801 } 802 bool isARMSOImmNeg() const { 803 if (!isImm()) return false; 804 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 805 if (!CE) return false; 806 int64_t Value = CE->getValue(); 807 // Only use this when not representable as a plain so_imm. 808 return ARM_AM::getSOImmVal(Value) == -1 && 809 ARM_AM::getSOImmVal(-Value) != -1; 810 } 811 bool isT2SOImm() const { 812 if (!isImm()) return false; 813 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 814 if (!CE) return false; 815 int64_t Value = CE->getValue(); 816 return ARM_AM::getT2SOImmVal(Value) != -1; 817 } 818 bool isT2SOImmNot() const { 819 if (!isImm()) return false; 820 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 821 if (!CE) return false; 822 int64_t Value = CE->getValue(); 823 return ARM_AM::getT2SOImmVal(~Value) != -1; 824 } 825 bool isT2SOImmNeg() const { 826 if (!isImm()) return false; 827 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 828 if (!CE) return false; 829 int64_t Value = CE->getValue(); 830 // Only use this when not representable as a plain so_imm. 831 return ARM_AM::getT2SOImmVal(Value) == -1 && 832 ARM_AM::getT2SOImmVal(-Value) != -1; 833 } 834 bool isSetEndImm() const { 835 if (!isImm()) return false; 836 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 837 if (!CE) return false; 838 int64_t Value = CE->getValue(); 839 return Value == 1 || Value == 0; 840 } 841 bool isReg() const { return Kind == k_Register; } 842 bool isRegList() const { return Kind == k_RegisterList; } 843 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 844 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 845 bool isToken() const { return Kind == k_Token; } 846 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 847 bool isMemory() const { return Kind == k_Memory; } 848 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 849 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 850 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 851 bool isRotImm() const { return Kind == k_RotateImmediate; } 852 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 853 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 854 bool isPostIdxReg() const { 855 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; 856 } 857 bool isMemNoOffset(bool alignOK = false) const { 858 if (!isMemory()) 859 return false; 860 // No offset of any kind. 861 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 862 (alignOK || Memory.Alignment == 0); 863 } 864 bool isMemPCRelImm12() const { 865 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 866 return false; 867 // Base register must be PC. 868 if (Memory.BaseRegNum != ARM::PC) 869 return false; 870 // Immediate offset in range [-4095, 4095]. 871 if (!Memory.OffsetImm) return true; 872 int64_t Val = Memory.OffsetImm->getValue(); 873 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 874 } 875 bool isAlignedMemory() const { 876 return isMemNoOffset(true); 877 } 878 bool isAddrMode2() const { 879 if (!isMemory() || Memory.Alignment != 0) return false; 880 // Check for register offset. 881 if (Memory.OffsetRegNum) return true; 882 // Immediate offset in range [-4095, 4095]. 883 if (!Memory.OffsetImm) return true; 884 int64_t Val = Memory.OffsetImm->getValue(); 885 return Val > -4096 && Val < 4096; 886 } 887 bool isAM2OffsetImm() const { 888 if (!isImm()) return false; 889 // Immediate offset in range [-4095, 4095]. 890 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 891 if (!CE) return false; 892 int64_t Val = CE->getValue(); 893 return Val > -4096 && Val < 4096; 894 } 895 bool isAddrMode3() const { 896 // If we have an immediate that's not a constant, treat it as a label 897 // reference needing a fixup. If it is a constant, it's something else 898 // and we reject it. 899 if (isImm() && !isa<MCConstantExpr>(getImm())) 900 return true; 901 if (!isMemory() || Memory.Alignment != 0) return false; 902 // No shifts are legal for AM3. 903 if (Memory.ShiftType != ARM_AM::no_shift) return false; 904 // Check for register offset. 905 if (Memory.OffsetRegNum) return true; 906 // Immediate offset in range [-255, 255]. 907 if (!Memory.OffsetImm) return true; 908 int64_t Val = Memory.OffsetImm->getValue(); 909 return Val > -256 && Val < 256; 910 } 911 bool isAM3Offset() const { 912 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 913 return false; 914 if (Kind == k_PostIndexRegister) 915 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 916 // Immediate offset in range [-255, 255]. 917 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 918 if (!CE) return false; 919 int64_t Val = CE->getValue(); 920 // Special case, #-0 is INT32_MIN. 921 return (Val > -256 && Val < 256) || Val == INT32_MIN; 922 } 923 bool isAddrMode5() const { 924 // If we have an immediate that's not a constant, treat it as a label 925 // reference needing a fixup. If it is a constant, it's something else 926 // and we reject it. 927 if (isImm() && !isa<MCConstantExpr>(getImm())) 928 return true; 929 if (!isMemory() || Memory.Alignment != 0) return false; 930 // Check for register offset. 931 if (Memory.OffsetRegNum) return false; 932 // Immediate offset in range [-1020, 1020] and a multiple of 4. 933 if (!Memory.OffsetImm) return true; 934 int64_t Val = Memory.OffsetImm->getValue(); 935 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 936 Val == INT32_MIN; 937 } 938 bool isMemTBB() const { 939 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 940 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 941 return false; 942 return true; 943 } 944 bool isMemTBH() const { 945 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 946 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 947 Memory.Alignment != 0 ) 948 return false; 949 return true; 950 } 951 bool isMemRegOffset() const { 952 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 953 return false; 954 return true; 955 } 956 bool isT2MemRegOffset() const { 957 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 958 Memory.Alignment != 0) 959 return false; 960 // Only lsl #{0, 1, 2, 3} allowed. 961 if (Memory.ShiftType == ARM_AM::no_shift) 962 return true; 963 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 964 return false; 965 return true; 966 } 967 bool isMemThumbRR() const { 968 // Thumb reg+reg addressing is simple. Just two registers, a base and 969 // an offset. No shifts, negations or any other complicating factors. 970 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 971 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 972 return false; 973 return isARMLowRegister(Memory.BaseRegNum) && 974 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 975 } 976 bool isMemThumbRIs4() const { 977 if (!isMemory() || Memory.OffsetRegNum != 0 || 978 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 979 return false; 980 // Immediate offset, multiple of 4 in range [0, 124]. 981 if (!Memory.OffsetImm) return true; 982 int64_t Val = Memory.OffsetImm->getValue(); 983 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 984 } 985 bool isMemThumbRIs2() const { 986 if (!isMemory() || Memory.OffsetRegNum != 0 || 987 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 988 return false; 989 // Immediate offset, multiple of 4 in range [0, 62]. 990 if (!Memory.OffsetImm) return true; 991 int64_t Val = Memory.OffsetImm->getValue(); 992 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 993 } 994 bool isMemThumbRIs1() const { 995 if (!isMemory() || Memory.OffsetRegNum != 0 || 996 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 997 return false; 998 // Immediate offset in range [0, 31]. 999 if (!Memory.OffsetImm) return true; 1000 int64_t Val = Memory.OffsetImm->getValue(); 1001 return Val >= 0 && Val <= 31; 1002 } 1003 bool isMemThumbSPI() const { 1004 if (!isMemory() || Memory.OffsetRegNum != 0 || 1005 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 1006 return false; 1007 // Immediate offset, multiple of 4 in range [0, 1020]. 1008 if (!Memory.OffsetImm) return true; 1009 int64_t Val = Memory.OffsetImm->getValue(); 1010 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 1011 } 1012 bool isMemImm8s4Offset() const { 1013 // If we have an immediate that's not a constant, treat it as a label 1014 // reference needing a fixup. If it is a constant, it's something else 1015 // and we reject it. 1016 if (isImm() && !isa<MCConstantExpr>(getImm())) 1017 return true; 1018 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1019 return false; 1020 // Immediate offset a multiple of 4 in range [-1020, 1020]. 1021 if (!Memory.OffsetImm) return true; 1022 int64_t Val = Memory.OffsetImm->getValue(); 1023 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 1024 } 1025 bool isMemImm0_1020s4Offset() const { 1026 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1027 return false; 1028 // Immediate offset a multiple of 4 in range [0, 1020]. 1029 if (!Memory.OffsetImm) return true; 1030 int64_t Val = Memory.OffsetImm->getValue(); 1031 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 1032 } 1033 bool isMemImm8Offset() const { 1034 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1035 return false; 1036 // Base reg of PC isn't allowed for these encodings. 1037 if (Memory.BaseRegNum == ARM::PC) return false; 1038 // Immediate offset in range [-255, 255]. 1039 if (!Memory.OffsetImm) return true; 1040 int64_t Val = Memory.OffsetImm->getValue(); 1041 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 1042 } 1043 bool isMemPosImm8Offset() const { 1044 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1045 return false; 1046 // Immediate offset in range [0, 255]. 1047 if (!Memory.OffsetImm) return true; 1048 int64_t Val = Memory.OffsetImm->getValue(); 1049 return Val >= 0 && Val < 256; 1050 } 1051 bool isMemNegImm8Offset() const { 1052 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1053 return false; 1054 // Base reg of PC isn't allowed for these encodings. 1055 if (Memory.BaseRegNum == ARM::PC) return false; 1056 // Immediate offset in range [-255, -1]. 1057 if (!Memory.OffsetImm) return false; 1058 int64_t Val = Memory.OffsetImm->getValue(); 1059 return (Val == INT32_MIN) || (Val > -256 && Val < 0); 1060 } 1061 bool isMemUImm12Offset() const { 1062 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1063 return false; 1064 // Immediate offset in range [0, 4095]. 1065 if (!Memory.OffsetImm) return true; 1066 int64_t Val = Memory.OffsetImm->getValue(); 1067 return (Val >= 0 && Val < 4096); 1068 } 1069 bool isMemImm12Offset() const { 1070 // If we have an immediate that's not a constant, treat it as a label 1071 // reference needing a fixup. If it is a constant, it's something else 1072 // and we reject it. 1073 if (isImm() && !isa<MCConstantExpr>(getImm())) 1074 return true; 1075 1076 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1077 return false; 1078 // Immediate offset in range [-4095, 4095]. 1079 if (!Memory.OffsetImm) return true; 1080 int64_t Val = Memory.OffsetImm->getValue(); 1081 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 1082 } 1083 bool isPostIdxImm8() const { 1084 if (!isImm()) return false; 1085 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1086 if (!CE) return false; 1087 int64_t Val = CE->getValue(); 1088 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 1089 } 1090 bool isPostIdxImm8s4() const { 1091 if (!isImm()) return false; 1092 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1093 if (!CE) return false; 1094 int64_t Val = CE->getValue(); 1095 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 1096 (Val == INT32_MIN); 1097 } 1098 1099 bool isMSRMask() const { return Kind == k_MSRMask; } 1100 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 1101 1102 // NEON operands. 1103 bool isSingleSpacedVectorList() const { 1104 return Kind == k_VectorList && !VectorList.isDoubleSpaced; 1105 } 1106 bool isDoubleSpacedVectorList() const { 1107 return Kind == k_VectorList && VectorList.isDoubleSpaced; 1108 } 1109 bool isVecListOneD() const { 1110 if (!isSingleSpacedVectorList()) return false; 1111 return VectorList.Count == 1; 1112 } 1113 1114 bool isVecListDPair() const { 1115 if (!isSingleSpacedVectorList()) return false; 1116 return (ARMMCRegisterClasses[ARM::DPairRegClassID] 1117 .contains(VectorList.RegNum)); 1118 } 1119 1120 bool isVecListThreeD() const { 1121 if (!isSingleSpacedVectorList()) return false; 1122 return VectorList.Count == 3; 1123 } 1124 1125 bool isVecListFourD() const { 1126 if (!isSingleSpacedVectorList()) return false; 1127 return VectorList.Count == 4; 1128 } 1129 1130 bool isVecListDPairSpaced() const { 1131 if (isSingleSpacedVectorList()) return false; 1132 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID] 1133 .contains(VectorList.RegNum)); 1134 } 1135 1136 bool isVecListThreeQ() const { 1137 if (!isDoubleSpacedVectorList()) return false; 1138 return VectorList.Count == 3; 1139 } 1140 1141 bool isVecListFourQ() const { 1142 if (!isDoubleSpacedVectorList()) return false; 1143 return VectorList.Count == 4; 1144 } 1145 1146 bool isSingleSpacedVectorAllLanes() const { 1147 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced; 1148 } 1149 bool isDoubleSpacedVectorAllLanes() const { 1150 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced; 1151 } 1152 bool isVecListOneDAllLanes() const { 1153 if (!isSingleSpacedVectorAllLanes()) return false; 1154 return VectorList.Count == 1; 1155 } 1156 1157 bool isVecListDPairAllLanes() const { 1158 if (!isSingleSpacedVectorAllLanes()) return false; 1159 return (ARMMCRegisterClasses[ARM::DPairRegClassID] 1160 .contains(VectorList.RegNum)); 1161 } 1162 1163 bool isVecListDPairSpacedAllLanes() const { 1164 if (!isDoubleSpacedVectorAllLanes()) return false; 1165 return VectorList.Count == 2; 1166 } 1167 1168 bool isVecListThreeDAllLanes() const { 1169 if (!isSingleSpacedVectorAllLanes()) return false; 1170 return VectorList.Count == 3; 1171 } 1172 1173 bool isVecListThreeQAllLanes() const { 1174 if (!isDoubleSpacedVectorAllLanes()) return false; 1175 return VectorList.Count == 3; 1176 } 1177 1178 bool isVecListFourDAllLanes() const { 1179 if (!isSingleSpacedVectorAllLanes()) return false; 1180 return VectorList.Count == 4; 1181 } 1182 1183 bool isVecListFourQAllLanes() const { 1184 if (!isDoubleSpacedVectorAllLanes()) return false; 1185 return VectorList.Count == 4; 1186 } 1187 1188 bool isSingleSpacedVectorIndexed() const { 1189 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced; 1190 } 1191 bool isDoubleSpacedVectorIndexed() const { 1192 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced; 1193 } 1194 bool isVecListOneDByteIndexed() const { 1195 if (!isSingleSpacedVectorIndexed()) return false; 1196 return VectorList.Count == 1 && VectorList.LaneIndex <= 7; 1197 } 1198 1199 bool isVecListOneDHWordIndexed() const { 1200 if (!isSingleSpacedVectorIndexed()) return false; 1201 return VectorList.Count == 1 && VectorList.LaneIndex <= 3; 1202 } 1203 1204 bool isVecListOneDWordIndexed() const { 1205 if (!isSingleSpacedVectorIndexed()) return false; 1206 return VectorList.Count == 1 && VectorList.LaneIndex <= 1; 1207 } 1208 1209 bool isVecListTwoDByteIndexed() const { 1210 if (!isSingleSpacedVectorIndexed()) return false; 1211 return VectorList.Count == 2 && VectorList.LaneIndex <= 7; 1212 } 1213 1214 bool isVecListTwoDHWordIndexed() const { 1215 if (!isSingleSpacedVectorIndexed()) return false; 1216 return VectorList.Count == 2 && VectorList.LaneIndex <= 3; 1217 } 1218 1219 bool isVecListTwoQWordIndexed() const { 1220 if (!isDoubleSpacedVectorIndexed()) return false; 1221 return VectorList.Count == 2 && VectorList.LaneIndex <= 1; 1222 } 1223 1224 bool isVecListTwoQHWordIndexed() const { 1225 if (!isDoubleSpacedVectorIndexed()) return false; 1226 return VectorList.Count == 2 && VectorList.LaneIndex <= 3; 1227 } 1228 1229 bool isVecListTwoDWordIndexed() const { 1230 if (!isSingleSpacedVectorIndexed()) return false; 1231 return VectorList.Count == 2 && VectorList.LaneIndex <= 1; 1232 } 1233 1234 bool isVecListThreeDByteIndexed() const { 1235 if (!isSingleSpacedVectorIndexed()) return false; 1236 return VectorList.Count == 3 && VectorList.LaneIndex <= 7; 1237 } 1238 1239 bool isVecListThreeDHWordIndexed() const { 1240 if (!isSingleSpacedVectorIndexed()) return false; 1241 return VectorList.Count == 3 && VectorList.LaneIndex <= 3; 1242 } 1243 1244 bool isVecListThreeQWordIndexed() const { 1245 if (!isDoubleSpacedVectorIndexed()) return false; 1246 return VectorList.Count == 3 && VectorList.LaneIndex <= 1; 1247 } 1248 1249 bool isVecListThreeQHWordIndexed() const { 1250 if (!isDoubleSpacedVectorIndexed()) return false; 1251 return VectorList.Count == 3 && VectorList.LaneIndex <= 3; 1252 } 1253 1254 bool isVecListThreeDWordIndexed() const { 1255 if (!isSingleSpacedVectorIndexed()) return false; 1256 return VectorList.Count == 3 && VectorList.LaneIndex <= 1; 1257 } 1258 1259 bool isVecListFourDByteIndexed() const { 1260 if (!isSingleSpacedVectorIndexed()) return false; 1261 return VectorList.Count == 4 && VectorList.LaneIndex <= 7; 1262 } 1263 1264 bool isVecListFourDHWordIndexed() const { 1265 if (!isSingleSpacedVectorIndexed()) return false; 1266 return VectorList.Count == 4 && VectorList.LaneIndex <= 3; 1267 } 1268 1269 bool isVecListFourQWordIndexed() const { 1270 if (!isDoubleSpacedVectorIndexed()) return false; 1271 return VectorList.Count == 4 && VectorList.LaneIndex <= 1; 1272 } 1273 1274 bool isVecListFourQHWordIndexed() const { 1275 if (!isDoubleSpacedVectorIndexed()) return false; 1276 return VectorList.Count == 4 && VectorList.LaneIndex <= 3; 1277 } 1278 1279 bool isVecListFourDWordIndexed() const { 1280 if (!isSingleSpacedVectorIndexed()) return false; 1281 return VectorList.Count == 4 && VectorList.LaneIndex <= 1; 1282 } 1283 1284 bool isVectorIndex8() const { 1285 if (Kind != k_VectorIndex) return false; 1286 return VectorIndex.Val < 8; 1287 } 1288 bool isVectorIndex16() const { 1289 if (Kind != k_VectorIndex) return false; 1290 return VectorIndex.Val < 4; 1291 } 1292 bool isVectorIndex32() const { 1293 if (Kind != k_VectorIndex) return false; 1294 return VectorIndex.Val < 2; 1295 } 1296 1297 bool isNEONi8splat() const { 1298 if (!isImm()) return false; 1299 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1300 // Must be a constant. 1301 if (!CE) return false; 1302 int64_t Value = CE->getValue(); 1303 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 1304 // value. 1305 return Value >= 0 && Value < 256; 1306 } 1307 1308 bool isNEONi16splat() const { 1309 if (!isImm()) return false; 1310 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1311 // Must be a constant. 1312 if (!CE) return false; 1313 int64_t Value = CE->getValue(); 1314 // i16 value in the range [0,255] or [0x0100, 0xff00] 1315 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 1316 } 1317 1318 bool isNEONi32splat() const { 1319 if (!isImm()) return false; 1320 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1321 // Must be a constant. 1322 if (!CE) return false; 1323 int64_t Value = CE->getValue(); 1324 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 1325 return (Value >= 0 && Value < 256) || 1326 (Value >= 0x0100 && Value <= 0xff00) || 1327 (Value >= 0x010000 && Value <= 0xff0000) || 1328 (Value >= 0x01000000 && Value <= 0xff000000); 1329 } 1330 1331 bool isNEONi32vmov() const { 1332 if (!isImm()) return false; 1333 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1334 // Must be a constant. 1335 if (!CE) return false; 1336 int64_t Value = CE->getValue(); 1337 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1338 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1339 return (Value >= 0 && Value < 256) || 1340 (Value >= 0x0100 && Value <= 0xff00) || 1341 (Value >= 0x010000 && Value <= 0xff0000) || 1342 (Value >= 0x01000000 && Value <= 0xff000000) || 1343 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1344 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1345 } 1346 bool isNEONi32vmovNeg() const { 1347 if (!isImm()) return false; 1348 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1349 // Must be a constant. 1350 if (!CE) return false; 1351 int64_t Value = ~CE->getValue(); 1352 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1353 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1354 return (Value >= 0 && Value < 256) || 1355 (Value >= 0x0100 && Value <= 0xff00) || 1356 (Value >= 0x010000 && Value <= 0xff0000) || 1357 (Value >= 0x01000000 && Value <= 0xff000000) || 1358 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1359 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1360 } 1361 1362 bool isNEONi64splat() const { 1363 if (!isImm()) return false; 1364 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1365 // Must be a constant. 1366 if (!CE) return false; 1367 uint64_t Value = CE->getValue(); 1368 // i64 value with each byte being either 0 or 0xff. 1369 for (unsigned i = 0; i < 8; ++i) 1370 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1371 return true; 1372 } 1373 1374 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1375 // Add as immediates when possible. Null MCExpr = 0. 1376 if (Expr == 0) 1377 Inst.addOperand(MCOperand::CreateImm(0)); 1378 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1379 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1380 else 1381 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1382 } 1383 1384 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1385 assert(N == 2 && "Invalid number of operands!"); 1386 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1387 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1388 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1389 } 1390 1391 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1392 assert(N == 1 && "Invalid number of operands!"); 1393 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1394 } 1395 1396 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1397 assert(N == 1 && "Invalid number of operands!"); 1398 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1399 } 1400 1401 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1402 assert(N == 1 && "Invalid number of operands!"); 1403 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1404 } 1405 1406 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1407 assert(N == 1 && "Invalid number of operands!"); 1408 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1409 } 1410 1411 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1412 assert(N == 1 && "Invalid number of operands!"); 1413 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1414 } 1415 1416 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1417 assert(N == 1 && "Invalid number of operands!"); 1418 Inst.addOperand(MCOperand::CreateReg(getReg())); 1419 } 1420 1421 void addRegOperands(MCInst &Inst, unsigned N) const { 1422 assert(N == 1 && "Invalid number of operands!"); 1423 Inst.addOperand(MCOperand::CreateReg(getReg())); 1424 } 1425 1426 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1427 assert(N == 3 && "Invalid number of operands!"); 1428 assert(isRegShiftedReg() && 1429 "addRegShiftedRegOperands() on non RegShiftedReg!"); 1430 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1431 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1432 Inst.addOperand(MCOperand::CreateImm( 1433 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1434 } 1435 1436 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1437 assert(N == 2 && "Invalid number of operands!"); 1438 assert(isRegShiftedImm() && 1439 "addRegShiftedImmOperands() on non RegShiftedImm!"); 1440 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1441 Inst.addOperand(MCOperand::CreateImm( 1442 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1443 } 1444 1445 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1446 assert(N == 1 && "Invalid number of operands!"); 1447 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1448 ShifterImm.Imm)); 1449 } 1450 1451 void addRegListOperands(MCInst &Inst, unsigned N) const { 1452 assert(N == 1 && "Invalid number of operands!"); 1453 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1454 for (SmallVectorImpl<unsigned>::const_iterator 1455 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1456 Inst.addOperand(MCOperand::CreateReg(*I)); 1457 } 1458 1459 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1460 addRegListOperands(Inst, N); 1461 } 1462 1463 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1464 addRegListOperands(Inst, N); 1465 } 1466 1467 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1468 assert(N == 1 && "Invalid number of operands!"); 1469 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1470 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1471 } 1472 1473 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1474 assert(N == 1 && "Invalid number of operands!"); 1475 // Munge the lsb/width into a bitfield mask. 1476 unsigned lsb = Bitfield.LSB; 1477 unsigned width = Bitfield.Width; 1478 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1479 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1480 (32 - (lsb + width))); 1481 Inst.addOperand(MCOperand::CreateImm(Mask)); 1482 } 1483 1484 void addImmOperands(MCInst &Inst, unsigned N) const { 1485 assert(N == 1 && "Invalid number of operands!"); 1486 addExpr(Inst, getImm()); 1487 } 1488 1489 void addFBits16Operands(MCInst &Inst, unsigned N) const { 1490 assert(N == 1 && "Invalid number of operands!"); 1491 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1492 Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue())); 1493 } 1494 1495 void addFBits32Operands(MCInst &Inst, unsigned N) const { 1496 assert(N == 1 && "Invalid number of operands!"); 1497 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1498 Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue())); 1499 } 1500 1501 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1502 assert(N == 1 && "Invalid number of operands!"); 1503 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1504 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); 1505 Inst.addOperand(MCOperand::CreateImm(Val)); 1506 } 1507 1508 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1509 assert(N == 1 && "Invalid number of operands!"); 1510 // FIXME: We really want to scale the value here, but the LDRD/STRD 1511 // instruction don't encode operands that way yet. 1512 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1513 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1514 } 1515 1516 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1517 assert(N == 1 && "Invalid number of operands!"); 1518 // The immediate is scaled by four in the encoding and is stored 1519 // in the MCInst as such. Lop off the low two bits here. 1520 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1521 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1522 } 1523 1524 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const { 1525 assert(N == 1 && "Invalid number of operands!"); 1526 // The immediate is scaled by four in the encoding and is stored 1527 // in the MCInst as such. Lop off the low two bits here. 1528 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1529 Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4))); 1530 } 1531 1532 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1533 assert(N == 1 && "Invalid number of operands!"); 1534 // The immediate is scaled by four in the encoding and is stored 1535 // in the MCInst as such. Lop off the low two bits here. 1536 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1537 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1538 } 1539 1540 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1541 assert(N == 1 && "Invalid number of operands!"); 1542 // The constant encodes as the immediate-1, and we store in the instruction 1543 // the bits as encoded, so subtract off one here. 1544 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1545 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1546 } 1547 1548 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1549 assert(N == 1 && "Invalid number of operands!"); 1550 // The constant encodes as the immediate-1, and we store in the instruction 1551 // the bits as encoded, so subtract off one here. 1552 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1553 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1554 } 1555 1556 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1557 assert(N == 1 && "Invalid number of operands!"); 1558 // The constant encodes as the immediate, except for 32, which encodes as 1559 // zero. 1560 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1561 unsigned Imm = CE->getValue(); 1562 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1563 } 1564 1565 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1566 assert(N == 1 && "Invalid number of operands!"); 1567 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1568 // the instruction as well. 1569 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1570 int Val = CE->getValue(); 1571 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1572 } 1573 1574 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 1575 assert(N == 1 && "Invalid number of operands!"); 1576 // The operand is actually a t2_so_imm, but we have its bitwise 1577 // negation in the assembly source, so twiddle it here. 1578 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1579 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1580 } 1581 1582 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const { 1583 assert(N == 1 && "Invalid number of operands!"); 1584 // The operand is actually a t2_so_imm, but we have its 1585 // negation in the assembly source, so twiddle it here. 1586 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1587 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1588 } 1589 1590 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const { 1591 assert(N == 1 && "Invalid number of operands!"); 1592 // The operand is actually an imm0_4095, but we have its 1593 // negation in the assembly source, so twiddle it here. 1594 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1595 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1596 } 1597 1598 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 1599 assert(N == 1 && "Invalid number of operands!"); 1600 // The operand is actually a so_imm, but we have its bitwise 1601 // negation in the assembly source, so twiddle it here. 1602 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1603 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1604 } 1605 1606 void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const { 1607 assert(N == 1 && "Invalid number of operands!"); 1608 // The operand is actually a so_imm, but we have its 1609 // negation in the assembly source, so twiddle it here. 1610 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1611 Inst.addOperand(MCOperand::CreateImm(-CE->getValue())); 1612 } 1613 1614 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1615 assert(N == 1 && "Invalid number of operands!"); 1616 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1617 } 1618 1619 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1620 assert(N == 1 && "Invalid number of operands!"); 1621 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1622 } 1623 1624 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const { 1625 assert(N == 1 && "Invalid number of operands!"); 1626 int32_t Imm = Memory.OffsetImm->getValue(); 1627 // FIXME: Handle #-0 1628 if (Imm == INT32_MIN) Imm = 0; 1629 Inst.addOperand(MCOperand::CreateImm(Imm)); 1630 } 1631 1632 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1633 assert(N == 2 && "Invalid number of operands!"); 1634 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1635 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1636 } 1637 1638 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1639 assert(N == 3 && "Invalid number of operands!"); 1640 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1641 if (!Memory.OffsetRegNum) { 1642 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1643 // Special case for #-0 1644 if (Val == INT32_MIN) Val = 0; 1645 if (Val < 0) Val = -Val; 1646 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1647 } else { 1648 // For register offset, we encode the shift type and negation flag 1649 // here. 1650 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1651 Memory.ShiftImm, Memory.ShiftType); 1652 } 1653 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1654 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1655 Inst.addOperand(MCOperand::CreateImm(Val)); 1656 } 1657 1658 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1659 assert(N == 2 && "Invalid number of operands!"); 1660 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1661 assert(CE && "non-constant AM2OffsetImm operand!"); 1662 int32_t Val = CE->getValue(); 1663 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1664 // Special case for #-0 1665 if (Val == INT32_MIN) Val = 0; 1666 if (Val < 0) Val = -Val; 1667 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1668 Inst.addOperand(MCOperand::CreateReg(0)); 1669 Inst.addOperand(MCOperand::CreateImm(Val)); 1670 } 1671 1672 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1673 assert(N == 3 && "Invalid number of operands!"); 1674 // If we have an immediate that's not a constant, treat it as a label 1675 // reference needing a fixup. If it is a constant, it's something else 1676 // and we reject it. 1677 if (isImm()) { 1678 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1679 Inst.addOperand(MCOperand::CreateReg(0)); 1680 Inst.addOperand(MCOperand::CreateImm(0)); 1681 return; 1682 } 1683 1684 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1685 if (!Memory.OffsetRegNum) { 1686 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1687 // Special case for #-0 1688 if (Val == INT32_MIN) Val = 0; 1689 if (Val < 0) Val = -Val; 1690 Val = ARM_AM::getAM3Opc(AddSub, Val); 1691 } else { 1692 // For register offset, we encode the shift type and negation flag 1693 // here. 1694 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1695 } 1696 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1697 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1698 Inst.addOperand(MCOperand::CreateImm(Val)); 1699 } 1700 1701 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1702 assert(N == 2 && "Invalid number of operands!"); 1703 if (Kind == k_PostIndexRegister) { 1704 int32_t Val = 1705 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1706 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1707 Inst.addOperand(MCOperand::CreateImm(Val)); 1708 return; 1709 } 1710 1711 // Constant offset. 1712 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1713 int32_t Val = CE->getValue(); 1714 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1715 // Special case for #-0 1716 if (Val == INT32_MIN) Val = 0; 1717 if (Val < 0) Val = -Val; 1718 Val = ARM_AM::getAM3Opc(AddSub, Val); 1719 Inst.addOperand(MCOperand::CreateReg(0)); 1720 Inst.addOperand(MCOperand::CreateImm(Val)); 1721 } 1722 1723 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1724 assert(N == 2 && "Invalid number of operands!"); 1725 // If we have an immediate that's not a constant, treat it as a label 1726 // reference needing a fixup. If it is a constant, it's something else 1727 // and we reject it. 1728 if (isImm()) { 1729 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1730 Inst.addOperand(MCOperand::CreateImm(0)); 1731 return; 1732 } 1733 1734 // The lower two bits are always zero and as such are not encoded. 1735 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1736 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1737 // Special case for #-0 1738 if (Val == INT32_MIN) Val = 0; 1739 if (Val < 0) Val = -Val; 1740 Val = ARM_AM::getAM5Opc(AddSub, Val); 1741 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1742 Inst.addOperand(MCOperand::CreateImm(Val)); 1743 } 1744 1745 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1746 assert(N == 2 && "Invalid number of operands!"); 1747 // If we have an immediate that's not a constant, treat it as a label 1748 // reference needing a fixup. If it is a constant, it's something else 1749 // and we reject it. 1750 if (isImm()) { 1751 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1752 Inst.addOperand(MCOperand::CreateImm(0)); 1753 return; 1754 } 1755 1756 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1757 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1758 Inst.addOperand(MCOperand::CreateImm(Val)); 1759 } 1760 1761 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1762 assert(N == 2 && "Invalid number of operands!"); 1763 // The lower two bits are always zero and as such are not encoded. 1764 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1765 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1766 Inst.addOperand(MCOperand::CreateImm(Val)); 1767 } 1768 1769 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1770 assert(N == 2 && "Invalid number of operands!"); 1771 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1772 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1773 Inst.addOperand(MCOperand::CreateImm(Val)); 1774 } 1775 1776 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1777 addMemImm8OffsetOperands(Inst, N); 1778 } 1779 1780 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1781 addMemImm8OffsetOperands(Inst, N); 1782 } 1783 1784 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1785 assert(N == 2 && "Invalid number of operands!"); 1786 // If this is an immediate, it's a label reference. 1787 if (isImm()) { 1788 addExpr(Inst, getImm()); 1789 Inst.addOperand(MCOperand::CreateImm(0)); 1790 return; 1791 } 1792 1793 // Otherwise, it's a normal memory reg+offset. 1794 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1795 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1796 Inst.addOperand(MCOperand::CreateImm(Val)); 1797 } 1798 1799 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1800 assert(N == 2 && "Invalid number of operands!"); 1801 // If this is an immediate, it's a label reference. 1802 if (isImm()) { 1803 addExpr(Inst, getImm()); 1804 Inst.addOperand(MCOperand::CreateImm(0)); 1805 return; 1806 } 1807 1808 // Otherwise, it's a normal memory reg+offset. 1809 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1810 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1811 Inst.addOperand(MCOperand::CreateImm(Val)); 1812 } 1813 1814 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1815 assert(N == 2 && "Invalid number of operands!"); 1816 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1817 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1818 } 1819 1820 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1821 assert(N == 2 && "Invalid number of operands!"); 1822 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1823 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1824 } 1825 1826 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1827 assert(N == 3 && "Invalid number of operands!"); 1828 unsigned Val = 1829 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1830 Memory.ShiftImm, Memory.ShiftType); 1831 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1832 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1833 Inst.addOperand(MCOperand::CreateImm(Val)); 1834 } 1835 1836 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1837 assert(N == 3 && "Invalid number of operands!"); 1838 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1839 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1840 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1841 } 1842 1843 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1844 assert(N == 2 && "Invalid number of operands!"); 1845 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1846 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1847 } 1848 1849 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1850 assert(N == 2 && "Invalid number of operands!"); 1851 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1852 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1853 Inst.addOperand(MCOperand::CreateImm(Val)); 1854 } 1855 1856 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1857 assert(N == 2 && "Invalid number of operands!"); 1858 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1859 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1860 Inst.addOperand(MCOperand::CreateImm(Val)); 1861 } 1862 1863 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1864 assert(N == 2 && "Invalid number of operands!"); 1865 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1866 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1867 Inst.addOperand(MCOperand::CreateImm(Val)); 1868 } 1869 1870 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1871 assert(N == 2 && "Invalid number of operands!"); 1872 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1873 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1874 Inst.addOperand(MCOperand::CreateImm(Val)); 1875 } 1876 1877 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1878 assert(N == 1 && "Invalid number of operands!"); 1879 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1880 assert(CE && "non-constant post-idx-imm8 operand!"); 1881 int Imm = CE->getValue(); 1882 bool isAdd = Imm >= 0; 1883 if (Imm == INT32_MIN) Imm = 0; 1884 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1885 Inst.addOperand(MCOperand::CreateImm(Imm)); 1886 } 1887 1888 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1889 assert(N == 1 && "Invalid number of operands!"); 1890 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1891 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1892 int Imm = CE->getValue(); 1893 bool isAdd = Imm >= 0; 1894 if (Imm == INT32_MIN) Imm = 0; 1895 // Immediate is scaled by 4. 1896 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1897 Inst.addOperand(MCOperand::CreateImm(Imm)); 1898 } 1899 1900 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1901 assert(N == 2 && "Invalid number of operands!"); 1902 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1903 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1904 } 1905 1906 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1907 assert(N == 2 && "Invalid number of operands!"); 1908 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1909 // The sign, shift type, and shift amount are encoded in a single operand 1910 // using the AM2 encoding helpers. 1911 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1912 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1913 PostIdxReg.ShiftTy); 1914 Inst.addOperand(MCOperand::CreateImm(Imm)); 1915 } 1916 1917 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1918 assert(N == 1 && "Invalid number of operands!"); 1919 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1920 } 1921 1922 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1923 assert(N == 1 && "Invalid number of operands!"); 1924 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1925 } 1926 1927 void addVecListOperands(MCInst &Inst, unsigned N) const { 1928 assert(N == 1 && "Invalid number of operands!"); 1929 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1930 } 1931 1932 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const { 1933 assert(N == 2 && "Invalid number of operands!"); 1934 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1935 Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex)); 1936 } 1937 1938 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1939 assert(N == 1 && "Invalid number of operands!"); 1940 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1941 } 1942 1943 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1944 assert(N == 1 && "Invalid number of operands!"); 1945 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1946 } 1947 1948 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1949 assert(N == 1 && "Invalid number of operands!"); 1950 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1951 } 1952 1953 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1954 assert(N == 1 && "Invalid number of operands!"); 1955 // The immediate encodes the type of constant as well as the value. 1956 // Mask in that this is an i8 splat. 1957 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1958 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1959 } 1960 1961 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1962 assert(N == 1 && "Invalid number of operands!"); 1963 // The immediate encodes the type of constant as well as the value. 1964 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1965 unsigned Value = CE->getValue(); 1966 if (Value >= 256) 1967 Value = (Value >> 8) | 0xa00; 1968 else 1969 Value |= 0x800; 1970 Inst.addOperand(MCOperand::CreateImm(Value)); 1971 } 1972 1973 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1974 assert(N == 1 && "Invalid number of operands!"); 1975 // The immediate encodes the type of constant as well as the value. 1976 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1977 unsigned Value = CE->getValue(); 1978 if (Value >= 256 && Value <= 0xff00) 1979 Value = (Value >> 8) | 0x200; 1980 else if (Value > 0xffff && Value <= 0xff0000) 1981 Value = (Value >> 16) | 0x400; 1982 else if (Value > 0xffffff) 1983 Value = (Value >> 24) | 0x600; 1984 Inst.addOperand(MCOperand::CreateImm(Value)); 1985 } 1986 1987 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1988 assert(N == 1 && "Invalid number of operands!"); 1989 // The immediate encodes the type of constant as well as the value. 1990 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1991 unsigned Value = CE->getValue(); 1992 if (Value >= 256 && Value <= 0xffff) 1993 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1994 else if (Value > 0xffff && Value <= 0xffffff) 1995 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1996 else if (Value > 0xffffff) 1997 Value = (Value >> 24) | 0x600; 1998 Inst.addOperand(MCOperand::CreateImm(Value)); 1999 } 2000 2001 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const { 2002 assert(N == 1 && "Invalid number of operands!"); 2003 // The immediate encodes the type of constant as well as the value. 2004 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2005 unsigned Value = ~CE->getValue(); 2006 if (Value >= 256 && Value <= 0xffff) 2007 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 2008 else if (Value > 0xffff && Value <= 0xffffff) 2009 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 2010 else if (Value > 0xffffff) 2011 Value = (Value >> 24) | 0x600; 2012 Inst.addOperand(MCOperand::CreateImm(Value)); 2013 } 2014 2015 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 2016 assert(N == 1 && "Invalid number of operands!"); 2017 // The immediate encodes the type of constant as well as the value. 2018 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2019 uint64_t Value = CE->getValue(); 2020 unsigned Imm = 0; 2021 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 2022 Imm |= (Value & 1) << i; 2023 } 2024 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 2025 } 2026 2027 virtual void print(raw_ostream &OS) const; 2028 2029 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 2030 ARMOperand *Op = new ARMOperand(k_ITCondMask); 2031 Op->ITMask.Mask = Mask; 2032 Op->StartLoc = S; 2033 Op->EndLoc = S; 2034 return Op; 2035 } 2036 2037 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 2038 ARMOperand *Op = new ARMOperand(k_CondCode); 2039 Op->CC.Val = CC; 2040 Op->StartLoc = S; 2041 Op->EndLoc = S; 2042 return Op; 2043 } 2044 2045 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 2046 ARMOperand *Op = new ARMOperand(k_CoprocNum); 2047 Op->Cop.Val = CopVal; 2048 Op->StartLoc = S; 2049 Op->EndLoc = S; 2050 return Op; 2051 } 2052 2053 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 2054 ARMOperand *Op = new ARMOperand(k_CoprocReg); 2055 Op->Cop.Val = CopVal; 2056 Op->StartLoc = S; 2057 Op->EndLoc = S; 2058 return Op; 2059 } 2060 2061 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 2062 ARMOperand *Op = new ARMOperand(k_CoprocOption); 2063 Op->Cop.Val = Val; 2064 Op->StartLoc = S; 2065 Op->EndLoc = E; 2066 return Op; 2067 } 2068 2069 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 2070 ARMOperand *Op = new ARMOperand(k_CCOut); 2071 Op->Reg.RegNum = RegNum; 2072 Op->StartLoc = S; 2073 Op->EndLoc = S; 2074 return Op; 2075 } 2076 2077 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 2078 ARMOperand *Op = new ARMOperand(k_Token); 2079 Op->Tok.Data = Str.data(); 2080 Op->Tok.Length = Str.size(); 2081 Op->StartLoc = S; 2082 Op->EndLoc = S; 2083 return Op; 2084 } 2085 2086 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 2087 ARMOperand *Op = new ARMOperand(k_Register); 2088 Op->Reg.RegNum = RegNum; 2089 Op->StartLoc = S; 2090 Op->EndLoc = E; 2091 return Op; 2092 } 2093 2094 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 2095 unsigned SrcReg, 2096 unsigned ShiftReg, 2097 unsigned ShiftImm, 2098 SMLoc S, SMLoc E) { 2099 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 2100 Op->RegShiftedReg.ShiftTy = ShTy; 2101 Op->RegShiftedReg.SrcReg = SrcReg; 2102 Op->RegShiftedReg.ShiftReg = ShiftReg; 2103 Op->RegShiftedReg.ShiftImm = ShiftImm; 2104 Op->StartLoc = S; 2105 Op->EndLoc = E; 2106 return Op; 2107 } 2108 2109 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 2110 unsigned SrcReg, 2111 unsigned ShiftImm, 2112 SMLoc S, SMLoc E) { 2113 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 2114 Op->RegShiftedImm.ShiftTy = ShTy; 2115 Op->RegShiftedImm.SrcReg = SrcReg; 2116 Op->RegShiftedImm.ShiftImm = ShiftImm; 2117 Op->StartLoc = S; 2118 Op->EndLoc = E; 2119 return Op; 2120 } 2121 2122 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 2123 SMLoc S, SMLoc E) { 2124 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 2125 Op->ShifterImm.isASR = isASR; 2126 Op->ShifterImm.Imm = Imm; 2127 Op->StartLoc = S; 2128 Op->EndLoc = E; 2129 return Op; 2130 } 2131 2132 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 2133 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 2134 Op->RotImm.Imm = Imm; 2135 Op->StartLoc = S; 2136 Op->EndLoc = E; 2137 return Op; 2138 } 2139 2140 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 2141 SMLoc S, SMLoc E) { 2142 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 2143 Op->Bitfield.LSB = LSB; 2144 Op->Bitfield.Width = Width; 2145 Op->StartLoc = S; 2146 Op->EndLoc = E; 2147 return Op; 2148 } 2149 2150 static ARMOperand * 2151 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 2152 SMLoc StartLoc, SMLoc EndLoc) { 2153 KindTy Kind = k_RegisterList; 2154 2155 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 2156 Kind = k_DPRRegisterList; 2157 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 2158 contains(Regs.front().first)) 2159 Kind = k_SPRRegisterList; 2160 2161 ARMOperand *Op = new ARMOperand(Kind); 2162 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 2163 I = Regs.begin(), E = Regs.end(); I != E; ++I) 2164 Op->Registers.push_back(I->first); 2165 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 2166 Op->StartLoc = StartLoc; 2167 Op->EndLoc = EndLoc; 2168 return Op; 2169 } 2170 2171 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 2172 bool isDoubleSpaced, SMLoc S, SMLoc E) { 2173 ARMOperand *Op = new ARMOperand(k_VectorList); 2174 Op->VectorList.RegNum = RegNum; 2175 Op->VectorList.Count = Count; 2176 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 2177 Op->StartLoc = S; 2178 Op->EndLoc = E; 2179 return Op; 2180 } 2181 2182 static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count, 2183 bool isDoubleSpaced, 2184 SMLoc S, SMLoc E) { 2185 ARMOperand *Op = new ARMOperand(k_VectorListAllLanes); 2186 Op->VectorList.RegNum = RegNum; 2187 Op->VectorList.Count = Count; 2188 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 2189 Op->StartLoc = S; 2190 Op->EndLoc = E; 2191 return Op; 2192 } 2193 2194 static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count, 2195 unsigned Index, 2196 bool isDoubleSpaced, 2197 SMLoc S, SMLoc E) { 2198 ARMOperand *Op = new ARMOperand(k_VectorListIndexed); 2199 Op->VectorList.RegNum = RegNum; 2200 Op->VectorList.Count = Count; 2201 Op->VectorList.LaneIndex = Index; 2202 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 2203 Op->StartLoc = S; 2204 Op->EndLoc = E; 2205 return Op; 2206 } 2207 2208 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 2209 MCContext &Ctx) { 2210 ARMOperand *Op = new ARMOperand(k_VectorIndex); 2211 Op->VectorIndex.Val = Idx; 2212 Op->StartLoc = S; 2213 Op->EndLoc = E; 2214 return Op; 2215 } 2216 2217 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 2218 ARMOperand *Op = new ARMOperand(k_Immediate); 2219 Op->Imm.Val = Val; 2220 Op->StartLoc = S; 2221 Op->EndLoc = E; 2222 return Op; 2223 } 2224 2225 static ARMOperand *CreateMem(unsigned BaseRegNum, 2226 const MCConstantExpr *OffsetImm, 2227 unsigned OffsetRegNum, 2228 ARM_AM::ShiftOpc ShiftType, 2229 unsigned ShiftImm, 2230 unsigned Alignment, 2231 bool isNegative, 2232 SMLoc S, SMLoc E) { 2233 ARMOperand *Op = new ARMOperand(k_Memory); 2234 Op->Memory.BaseRegNum = BaseRegNum; 2235 Op->Memory.OffsetImm = OffsetImm; 2236 Op->Memory.OffsetRegNum = OffsetRegNum; 2237 Op->Memory.ShiftType = ShiftType; 2238 Op->Memory.ShiftImm = ShiftImm; 2239 Op->Memory.Alignment = Alignment; 2240 Op->Memory.isNegative = isNegative; 2241 Op->StartLoc = S; 2242 Op->EndLoc = E; 2243 return Op; 2244 } 2245 2246 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 2247 ARM_AM::ShiftOpc ShiftTy, 2248 unsigned ShiftImm, 2249 SMLoc S, SMLoc E) { 2250 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 2251 Op->PostIdxReg.RegNum = RegNum; 2252 Op->PostIdxReg.isAdd = isAdd; 2253 Op->PostIdxReg.ShiftTy = ShiftTy; 2254 Op->PostIdxReg.ShiftImm = ShiftImm; 2255 Op->StartLoc = S; 2256 Op->EndLoc = E; 2257 return Op; 2258 } 2259 2260 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 2261 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 2262 Op->MBOpt.Val = Opt; 2263 Op->StartLoc = S; 2264 Op->EndLoc = S; 2265 return Op; 2266 } 2267 2268 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 2269 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 2270 Op->IFlags.Val = IFlags; 2271 Op->StartLoc = S; 2272 Op->EndLoc = S; 2273 return Op; 2274 } 2275 2276 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 2277 ARMOperand *Op = new ARMOperand(k_MSRMask); 2278 Op->MMask.Val = MMask; 2279 Op->StartLoc = S; 2280 Op->EndLoc = S; 2281 return Op; 2282 } 2283 }; 2284 2285 } // end anonymous namespace. 2286 2287 void ARMOperand::print(raw_ostream &OS) const { 2288 switch (Kind) { 2289 case k_CondCode: 2290 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 2291 break; 2292 case k_CCOut: 2293 OS << "<ccout " << getReg() << ">"; 2294 break; 2295 case k_ITCondMask: { 2296 static const char *MaskStr[] = { 2297 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 2298 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 2299 }; 2300 assert((ITMask.Mask & 0xf) == ITMask.Mask); 2301 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 2302 break; 2303 } 2304 case k_CoprocNum: 2305 OS << "<coprocessor number: " << getCoproc() << ">"; 2306 break; 2307 case k_CoprocReg: 2308 OS << "<coprocessor register: " << getCoproc() << ">"; 2309 break; 2310 case k_CoprocOption: 2311 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 2312 break; 2313 case k_MSRMask: 2314 OS << "<mask: " << getMSRMask() << ">"; 2315 break; 2316 case k_Immediate: 2317 getImm()->print(OS); 2318 break; 2319 case k_MemBarrierOpt: 2320 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 2321 break; 2322 case k_Memory: 2323 OS << "<memory " 2324 << " base:" << Memory.BaseRegNum; 2325 OS << ">"; 2326 break; 2327 case k_PostIndexRegister: 2328 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 2329 << PostIdxReg.RegNum; 2330 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 2331 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 2332 << PostIdxReg.ShiftImm; 2333 OS << ">"; 2334 break; 2335 case k_ProcIFlags: { 2336 OS << "<ARM_PROC::"; 2337 unsigned IFlags = getProcIFlags(); 2338 for (int i=2; i >= 0; --i) 2339 if (IFlags & (1 << i)) 2340 OS << ARM_PROC::IFlagsToString(1 << i); 2341 OS << ">"; 2342 break; 2343 } 2344 case k_Register: 2345 OS << "<register " << getReg() << ">"; 2346 break; 2347 case k_ShifterImmediate: 2348 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 2349 << " #" << ShifterImm.Imm << ">"; 2350 break; 2351 case k_ShiftedRegister: 2352 OS << "<so_reg_reg " 2353 << RegShiftedReg.SrcReg << " " 2354 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) 2355 << " " << RegShiftedReg.ShiftReg << ">"; 2356 break; 2357 case k_ShiftedImmediate: 2358 OS << "<so_reg_imm " 2359 << RegShiftedImm.SrcReg << " " 2360 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) 2361 << " #" << RegShiftedImm.ShiftImm << ">"; 2362 break; 2363 case k_RotateImmediate: 2364 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 2365 break; 2366 case k_BitfieldDescriptor: 2367 OS << "<bitfield " << "lsb: " << Bitfield.LSB 2368 << ", width: " << Bitfield.Width << ">"; 2369 break; 2370 case k_RegisterList: 2371 case k_DPRRegisterList: 2372 case k_SPRRegisterList: { 2373 OS << "<register_list "; 2374 2375 const SmallVectorImpl<unsigned> &RegList = getRegList(); 2376 for (SmallVectorImpl<unsigned>::const_iterator 2377 I = RegList.begin(), E = RegList.end(); I != E; ) { 2378 OS << *I; 2379 if (++I < E) OS << ", "; 2380 } 2381 2382 OS << ">"; 2383 break; 2384 } 2385 case k_VectorList: 2386 OS << "<vector_list " << VectorList.Count << " * " 2387 << VectorList.RegNum << ">"; 2388 break; 2389 case k_VectorListAllLanes: 2390 OS << "<vector_list(all lanes) " << VectorList.Count << " * " 2391 << VectorList.RegNum << ">"; 2392 break; 2393 case k_VectorListIndexed: 2394 OS << "<vector_list(lane " << VectorList.LaneIndex << ") " 2395 << VectorList.Count << " * " << VectorList.RegNum << ">"; 2396 break; 2397 case k_Token: 2398 OS << "'" << getToken() << "'"; 2399 break; 2400 case k_VectorIndex: 2401 OS << "<vectorindex " << getVectorIndex() << ">"; 2402 break; 2403 } 2404 } 2405 2406 /// @name Auto-generated Match Functions 2407 /// { 2408 2409 static unsigned MatchRegisterName(StringRef Name); 2410 2411 /// } 2412 2413 bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2414 SMLoc &StartLoc, SMLoc &EndLoc) { 2415 StartLoc = Parser.getTok().getLoc(); 2416 RegNo = tryParseRegister(); 2417 EndLoc = Parser.getTok().getLoc(); 2418 2419 return (RegNo == (unsigned)-1); 2420 } 2421 2422 /// Try to parse a register name. The token must be an Identifier when called, 2423 /// and if it is a register name the token is eaten and the register number is 2424 /// returned. Otherwise return -1. 2425 /// 2426 int ARMAsmParser::tryParseRegister() { 2427 const AsmToken &Tok = Parser.getTok(); 2428 if (Tok.isNot(AsmToken::Identifier)) return -1; 2429 2430 std::string lowerCase = Tok.getString().lower(); 2431 unsigned RegNum = MatchRegisterName(lowerCase); 2432 if (!RegNum) { 2433 RegNum = StringSwitch<unsigned>(lowerCase) 2434 .Case("r13", ARM::SP) 2435 .Case("r14", ARM::LR) 2436 .Case("r15", ARM::PC) 2437 .Case("ip", ARM::R12) 2438 // Additional register name aliases for 'gas' compatibility. 2439 .Case("a1", ARM::R0) 2440 .Case("a2", ARM::R1) 2441 .Case("a3", ARM::R2) 2442 .Case("a4", ARM::R3) 2443 .Case("v1", ARM::R4) 2444 .Case("v2", ARM::R5) 2445 .Case("v3", ARM::R6) 2446 .Case("v4", ARM::R7) 2447 .Case("v5", ARM::R8) 2448 .Case("v6", ARM::R9) 2449 .Case("v7", ARM::R10) 2450 .Case("v8", ARM::R11) 2451 .Case("sb", ARM::R9) 2452 .Case("sl", ARM::R10) 2453 .Case("fp", ARM::R11) 2454 .Default(0); 2455 } 2456 if (!RegNum) { 2457 // Check for aliases registered via .req. Canonicalize to lower case. 2458 // That's more consistent since register names are case insensitive, and 2459 // it's how the original entry was passed in from MC/MCParser/AsmParser. 2460 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase); 2461 // If no match, return failure. 2462 if (Entry == RegisterReqs.end()) 2463 return -1; 2464 Parser.Lex(); // Eat identifier token. 2465 return Entry->getValue(); 2466 } 2467 2468 Parser.Lex(); // Eat identifier token. 2469 2470 return RegNum; 2471 } 2472 2473 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2474 // If a recoverable error occurs, return 1. If an irrecoverable error 2475 // occurs, return -1. An irrecoverable error is one where tokens have been 2476 // consumed in the process of trying to parse the shifter (i.e., when it is 2477 // indeed a shifter operand, but malformed). 2478 int ARMAsmParser::tryParseShiftRegister( 2479 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2480 SMLoc S = Parser.getTok().getLoc(); 2481 const AsmToken &Tok = Parser.getTok(); 2482 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2483 2484 std::string lowerCase = Tok.getString().lower(); 2485 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2486 .Case("asl", ARM_AM::lsl) 2487 .Case("lsl", ARM_AM::lsl) 2488 .Case("lsr", ARM_AM::lsr) 2489 .Case("asr", ARM_AM::asr) 2490 .Case("ror", ARM_AM::ror) 2491 .Case("rrx", ARM_AM::rrx) 2492 .Default(ARM_AM::no_shift); 2493 2494 if (ShiftTy == ARM_AM::no_shift) 2495 return 1; 2496 2497 Parser.Lex(); // Eat the operator. 2498 2499 // The source register for the shift has already been added to the 2500 // operand list, so we need to pop it off and combine it into the shifted 2501 // register operand instead. 2502 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2503 if (!PrevOp->isReg()) 2504 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2505 int SrcReg = PrevOp->getReg(); 2506 int64_t Imm = 0; 2507 int ShiftReg = 0; 2508 if (ShiftTy == ARM_AM::rrx) { 2509 // RRX Doesn't have an explicit shift amount. The encoder expects 2510 // the shift register to be the same as the source register. Seems odd, 2511 // but OK. 2512 ShiftReg = SrcReg; 2513 } else { 2514 // Figure out if this is shifted by a constant or a register (for non-RRX). 2515 if (Parser.getTok().is(AsmToken::Hash) || 2516 Parser.getTok().is(AsmToken::Dollar)) { 2517 Parser.Lex(); // Eat hash. 2518 SMLoc ImmLoc = Parser.getTok().getLoc(); 2519 const MCExpr *ShiftExpr = 0; 2520 if (getParser().ParseExpression(ShiftExpr)) { 2521 Error(ImmLoc, "invalid immediate shift value"); 2522 return -1; 2523 } 2524 // The expression must be evaluatable as an immediate. 2525 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2526 if (!CE) { 2527 Error(ImmLoc, "invalid immediate shift value"); 2528 return -1; 2529 } 2530 // Range check the immediate. 2531 // lsl, ror: 0 <= imm <= 31 2532 // lsr, asr: 0 <= imm <= 32 2533 Imm = CE->getValue(); 2534 if (Imm < 0 || 2535 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2536 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2537 Error(ImmLoc, "immediate shift value out of range"); 2538 return -1; 2539 } 2540 // shift by zero is a nop. Always send it through as lsl. 2541 // ('as' compatibility) 2542 if (Imm == 0) 2543 ShiftTy = ARM_AM::lsl; 2544 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2545 ShiftReg = tryParseRegister(); 2546 SMLoc L = Parser.getTok().getLoc(); 2547 if (ShiftReg == -1) { 2548 Error (L, "expected immediate or register in shift operand"); 2549 return -1; 2550 } 2551 } else { 2552 Error (Parser.getTok().getLoc(), 2553 "expected immediate or register in shift operand"); 2554 return -1; 2555 } 2556 } 2557 2558 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2559 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2560 ShiftReg, Imm, 2561 S, Parser.getTok().getLoc())); 2562 else 2563 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2564 S, Parser.getTok().getLoc())); 2565 2566 return 0; 2567 } 2568 2569 2570 /// Try to parse a register name. The token must be an Identifier when called. 2571 /// If it's a register, an AsmOperand is created. Another AsmOperand is created 2572 /// if there is a "writeback". 'true' if it's not a register. 2573 /// 2574 /// TODO this is likely to change to allow different register types and or to 2575 /// parse for a specific register type. 2576 bool ARMAsmParser:: 2577 tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2578 SMLoc S = Parser.getTok().getLoc(); 2579 int RegNo = tryParseRegister(); 2580 if (RegNo == -1) 2581 return true; 2582 2583 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2584 2585 const AsmToken &ExclaimTok = Parser.getTok(); 2586 if (ExclaimTok.is(AsmToken::Exclaim)) { 2587 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2588 ExclaimTok.getLoc())); 2589 Parser.Lex(); // Eat exclaim token 2590 return false; 2591 } 2592 2593 // Also check for an index operand. This is only legal for vector registers, 2594 // but that'll get caught OK in operand matching, so we don't need to 2595 // explicitly filter everything else out here. 2596 if (Parser.getTok().is(AsmToken::LBrac)) { 2597 SMLoc SIdx = Parser.getTok().getLoc(); 2598 Parser.Lex(); // Eat left bracket token. 2599 2600 const MCExpr *ImmVal; 2601 if (getParser().ParseExpression(ImmVal)) 2602 return true; 2603 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2604 if (!MCE) 2605 return TokError("immediate value expected for vector index"); 2606 2607 SMLoc E = Parser.getTok().getLoc(); 2608 if (Parser.getTok().isNot(AsmToken::RBrac)) 2609 return Error(E, "']' expected"); 2610 2611 Parser.Lex(); // Eat right bracket token. 2612 2613 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2614 SIdx, E, 2615 getContext())); 2616 } 2617 2618 return false; 2619 } 2620 2621 /// MatchCoprocessorOperandName - Try to parse an coprocessor related 2622 /// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2623 /// "c5", ... 2624 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2625 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2626 // but efficient. 2627 switch (Name.size()) { 2628 default: return -1; 2629 case 2: 2630 if (Name[0] != CoprocOp) 2631 return -1; 2632 switch (Name[1]) { 2633 default: return -1; 2634 case '0': return 0; 2635 case '1': return 1; 2636 case '2': return 2; 2637 case '3': return 3; 2638 case '4': return 4; 2639 case '5': return 5; 2640 case '6': return 6; 2641 case '7': return 7; 2642 case '8': return 8; 2643 case '9': return 9; 2644 } 2645 case 3: 2646 if (Name[0] != CoprocOp || Name[1] != '1') 2647 return -1; 2648 switch (Name[2]) { 2649 default: return -1; 2650 case '0': return 10; 2651 case '1': return 11; 2652 case '2': return 12; 2653 case '3': return 13; 2654 case '4': return 14; 2655 case '5': return 15; 2656 } 2657 } 2658 } 2659 2660 /// parseITCondCode - Try to parse a condition code for an IT instruction. 2661 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2662 parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2663 SMLoc S = Parser.getTok().getLoc(); 2664 const AsmToken &Tok = Parser.getTok(); 2665 if (!Tok.is(AsmToken::Identifier)) 2666 return MatchOperand_NoMatch; 2667 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2668 .Case("eq", ARMCC::EQ) 2669 .Case("ne", ARMCC::NE) 2670 .Case("hs", ARMCC::HS) 2671 .Case("cs", ARMCC::HS) 2672 .Case("lo", ARMCC::LO) 2673 .Case("cc", ARMCC::LO) 2674 .Case("mi", ARMCC::MI) 2675 .Case("pl", ARMCC::PL) 2676 .Case("vs", ARMCC::VS) 2677 .Case("vc", ARMCC::VC) 2678 .Case("hi", ARMCC::HI) 2679 .Case("ls", ARMCC::LS) 2680 .Case("ge", ARMCC::GE) 2681 .Case("lt", ARMCC::LT) 2682 .Case("gt", ARMCC::GT) 2683 .Case("le", ARMCC::LE) 2684 .Case("al", ARMCC::AL) 2685 .Default(~0U); 2686 if (CC == ~0U) 2687 return MatchOperand_NoMatch; 2688 Parser.Lex(); // Eat the token. 2689 2690 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2691 2692 return MatchOperand_Success; 2693 } 2694 2695 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2696 /// token must be an Identifier when called, and if it is a coprocessor 2697 /// number, the token is eaten and the operand is added to the operand list. 2698 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2699 parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2700 SMLoc S = Parser.getTok().getLoc(); 2701 const AsmToken &Tok = Parser.getTok(); 2702 if (Tok.isNot(AsmToken::Identifier)) 2703 return MatchOperand_NoMatch; 2704 2705 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2706 if (Num == -1) 2707 return MatchOperand_NoMatch; 2708 2709 Parser.Lex(); // Eat identifier token. 2710 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2711 return MatchOperand_Success; 2712 } 2713 2714 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2715 /// token must be an Identifier when called, and if it is a coprocessor 2716 /// number, the token is eaten and the operand is added to the operand list. 2717 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2718 parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2719 SMLoc S = Parser.getTok().getLoc(); 2720 const AsmToken &Tok = Parser.getTok(); 2721 if (Tok.isNot(AsmToken::Identifier)) 2722 return MatchOperand_NoMatch; 2723 2724 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2725 if (Reg == -1) 2726 return MatchOperand_NoMatch; 2727 2728 Parser.Lex(); // Eat identifier token. 2729 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2730 return MatchOperand_Success; 2731 } 2732 2733 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2734 /// coproc_option : '{' imm0_255 '}' 2735 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2736 parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2737 SMLoc S = Parser.getTok().getLoc(); 2738 2739 // If this isn't a '{', this isn't a coprocessor immediate operand. 2740 if (Parser.getTok().isNot(AsmToken::LCurly)) 2741 return MatchOperand_NoMatch; 2742 Parser.Lex(); // Eat the '{' 2743 2744 const MCExpr *Expr; 2745 SMLoc Loc = Parser.getTok().getLoc(); 2746 if (getParser().ParseExpression(Expr)) { 2747 Error(Loc, "illegal expression"); 2748 return MatchOperand_ParseFail; 2749 } 2750 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2751 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2752 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2753 return MatchOperand_ParseFail; 2754 } 2755 int Val = CE->getValue(); 2756 2757 // Check for and consume the closing '}' 2758 if (Parser.getTok().isNot(AsmToken::RCurly)) 2759 return MatchOperand_ParseFail; 2760 SMLoc E = Parser.getTok().getLoc(); 2761 Parser.Lex(); // Eat the '}' 2762 2763 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2764 return MatchOperand_Success; 2765 } 2766 2767 // For register list parsing, we need to map from raw GPR register numbering 2768 // to the enumeration values. The enumeration values aren't sorted by 2769 // register number due to our using "sp", "lr" and "pc" as canonical names. 2770 static unsigned getNextRegister(unsigned Reg) { 2771 // If this is a GPR, we need to do it manually, otherwise we can rely 2772 // on the sort ordering of the enumeration since the other reg-classes 2773 // are sane. 2774 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2775 return Reg + 1; 2776 switch(Reg) { 2777 default: llvm_unreachable("Invalid GPR number!"); 2778 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2779 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2780 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2781 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2782 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2783 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2784 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2785 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2786 } 2787 } 2788 2789 // Return the low-subreg of a given Q register. 2790 static unsigned getDRegFromQReg(unsigned QReg) { 2791 switch (QReg) { 2792 default: llvm_unreachable("expected a Q register!"); 2793 case ARM::Q0: return ARM::D0; 2794 case ARM::Q1: return ARM::D2; 2795 case ARM::Q2: return ARM::D4; 2796 case ARM::Q3: return ARM::D6; 2797 case ARM::Q4: return ARM::D8; 2798 case ARM::Q5: return ARM::D10; 2799 case ARM::Q6: return ARM::D12; 2800 case ARM::Q7: return ARM::D14; 2801 case ARM::Q8: return ARM::D16; 2802 case ARM::Q9: return ARM::D18; 2803 case ARM::Q10: return ARM::D20; 2804 case ARM::Q11: return ARM::D22; 2805 case ARM::Q12: return ARM::D24; 2806 case ARM::Q13: return ARM::D26; 2807 case ARM::Q14: return ARM::D28; 2808 case ARM::Q15: return ARM::D30; 2809 } 2810 } 2811 2812 /// Parse a register list. 2813 bool ARMAsmParser:: 2814 parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2815 assert(Parser.getTok().is(AsmToken::LCurly) && 2816 "Token is not a Left Curly Brace"); 2817 SMLoc S = Parser.getTok().getLoc(); 2818 Parser.Lex(); // Eat '{' token. 2819 SMLoc RegLoc = Parser.getTok().getLoc(); 2820 2821 // Check the first register in the list to see what register class 2822 // this is a list of. 2823 int Reg = tryParseRegister(); 2824 if (Reg == -1) 2825 return Error(RegLoc, "register expected"); 2826 2827 // The reglist instructions have at most 16 registers, so reserve 2828 // space for that many. 2829 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2830 2831 // Allow Q regs and just interpret them as the two D sub-registers. 2832 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2833 Reg = getDRegFromQReg(Reg); 2834 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2835 ++Reg; 2836 } 2837 const MCRegisterClass *RC; 2838 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2839 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2840 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2841 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2842 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2843 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2844 else 2845 return Error(RegLoc, "invalid register in register list"); 2846 2847 // Store the register. 2848 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2849 2850 // This starts immediately after the first register token in the list, 2851 // so we can see either a comma or a minus (range separator) as a legal 2852 // next token. 2853 while (Parser.getTok().is(AsmToken::Comma) || 2854 Parser.getTok().is(AsmToken::Minus)) { 2855 if (Parser.getTok().is(AsmToken::Minus)) { 2856 Parser.Lex(); // Eat the minus. 2857 SMLoc EndLoc = Parser.getTok().getLoc(); 2858 int EndReg = tryParseRegister(); 2859 if (EndReg == -1) 2860 return Error(EndLoc, "register expected"); 2861 // Allow Q regs and just interpret them as the two D sub-registers. 2862 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2863 EndReg = getDRegFromQReg(EndReg) + 1; 2864 // If the register is the same as the start reg, there's nothing 2865 // more to do. 2866 if (Reg == EndReg) 2867 continue; 2868 // The register must be in the same register class as the first. 2869 if (!RC->contains(EndReg)) 2870 return Error(EndLoc, "invalid register in register list"); 2871 // Ranges must go from low to high. 2872 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2873 return Error(EndLoc, "bad range in register list"); 2874 2875 // Add all the registers in the range to the register list. 2876 while (Reg != EndReg) { 2877 Reg = getNextRegister(Reg); 2878 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2879 } 2880 continue; 2881 } 2882 Parser.Lex(); // Eat the comma. 2883 RegLoc = Parser.getTok().getLoc(); 2884 int OldReg = Reg; 2885 const AsmToken RegTok = Parser.getTok(); 2886 Reg = tryParseRegister(); 2887 if (Reg == -1) 2888 return Error(RegLoc, "register expected"); 2889 // Allow Q regs and just interpret them as the two D sub-registers. 2890 bool isQReg = false; 2891 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2892 Reg = getDRegFromQReg(Reg); 2893 isQReg = true; 2894 } 2895 // The register must be in the same register class as the first. 2896 if (!RC->contains(Reg)) 2897 return Error(RegLoc, "invalid register in register list"); 2898 // List must be monotonically increasing. 2899 if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) { 2900 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2901 Warning(RegLoc, "register list not in ascending order"); 2902 else 2903 return Error(RegLoc, "register list not in ascending order"); 2904 } 2905 if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) { 2906 Warning(RegLoc, "duplicated register (" + RegTok.getString() + 2907 ") in register list"); 2908 continue; 2909 } 2910 // VFP register lists must also be contiguous. 2911 // It's OK to use the enumeration values directly here rather, as the 2912 // VFP register classes have the enum sorted properly. 2913 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2914 Reg != OldReg + 1) 2915 return Error(RegLoc, "non-contiguous register range"); 2916 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2917 if (isQReg) 2918 Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc)); 2919 } 2920 2921 SMLoc E = Parser.getTok().getLoc(); 2922 if (Parser.getTok().isNot(AsmToken::RCurly)) 2923 return Error(E, "'}' expected"); 2924 Parser.Lex(); // Eat '}' token. 2925 2926 // Push the register list operand. 2927 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2928 2929 // The ARM system instruction variants for LDM/STM have a '^' token here. 2930 if (Parser.getTok().is(AsmToken::Caret)) { 2931 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc())); 2932 Parser.Lex(); // Eat '^' token. 2933 } 2934 2935 return false; 2936 } 2937 2938 // Helper function to parse the lane index for vector lists. 2939 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2940 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) { 2941 Index = 0; // Always return a defined index value. 2942 if (Parser.getTok().is(AsmToken::LBrac)) { 2943 Parser.Lex(); // Eat the '['. 2944 if (Parser.getTok().is(AsmToken::RBrac)) { 2945 // "Dn[]" is the 'all lanes' syntax. 2946 LaneKind = AllLanes; 2947 Parser.Lex(); // Eat the ']'. 2948 return MatchOperand_Success; 2949 } 2950 2951 // There's an optional '#' token here. Normally there wouldn't be, but 2952 // inline assemble puts one in, and it's friendly to accept that. 2953 if (Parser.getTok().is(AsmToken::Hash)) 2954 Parser.Lex(); // Eat the '#' 2955 2956 const MCExpr *LaneIndex; 2957 SMLoc Loc = Parser.getTok().getLoc(); 2958 if (getParser().ParseExpression(LaneIndex)) { 2959 Error(Loc, "illegal expression"); 2960 return MatchOperand_ParseFail; 2961 } 2962 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex); 2963 if (!CE) { 2964 Error(Loc, "lane index must be empty or an integer"); 2965 return MatchOperand_ParseFail; 2966 } 2967 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2968 Error(Parser.getTok().getLoc(), "']' expected"); 2969 return MatchOperand_ParseFail; 2970 } 2971 Parser.Lex(); // Eat the ']'. 2972 int64_t Val = CE->getValue(); 2973 2974 // FIXME: Make this range check context sensitive for .8, .16, .32. 2975 if (Val < 0 || Val > 7) { 2976 Error(Parser.getTok().getLoc(), "lane index out of range"); 2977 return MatchOperand_ParseFail; 2978 } 2979 Index = Val; 2980 LaneKind = IndexedLane; 2981 return MatchOperand_Success; 2982 } 2983 LaneKind = NoLanes; 2984 return MatchOperand_Success; 2985 } 2986 2987 // parse a vector register list 2988 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2989 parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2990 VectorLaneTy LaneKind; 2991 unsigned LaneIndex; 2992 SMLoc S = Parser.getTok().getLoc(); 2993 // As an extension (to match gas), support a plain D register or Q register 2994 // (without encosing curly braces) as a single or double entry list, 2995 // respectively. 2996 if (Parser.getTok().is(AsmToken::Identifier)) { 2997 int Reg = tryParseRegister(); 2998 if (Reg == -1) 2999 return MatchOperand_NoMatch; 3000 SMLoc E = Parser.getTok().getLoc(); 3001 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 3002 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 3003 if (Res != MatchOperand_Success) 3004 return Res; 3005 switch (LaneKind) { 3006 case NoLanes: 3007 E = Parser.getTok().getLoc(); 3008 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E)); 3009 break; 3010 case AllLanes: 3011 E = Parser.getTok().getLoc(); 3012 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false, 3013 S, E)); 3014 break; 3015 case IndexedLane: 3016 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1, 3017 LaneIndex, 3018 false, S, E)); 3019 break; 3020 } 3021 return MatchOperand_Success; 3022 } 3023 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 3024 Reg = getDRegFromQReg(Reg); 3025 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 3026 if (Res != MatchOperand_Success) 3027 return Res; 3028 switch (LaneKind) { 3029 case NoLanes: 3030 E = Parser.getTok().getLoc(); 3031 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, 3032 &ARMMCRegisterClasses[ARM::DPairRegClassID]); 3033 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E)); 3034 break; 3035 case AllLanes: 3036 E = Parser.getTok().getLoc(); 3037 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, 3038 &ARMMCRegisterClasses[ARM::DPairRegClassID]); 3039 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false, 3040 S, E)); 3041 break; 3042 case IndexedLane: 3043 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2, 3044 LaneIndex, 3045 false, S, E)); 3046 break; 3047 } 3048 return MatchOperand_Success; 3049 } 3050 Error(S, "vector register expected"); 3051 return MatchOperand_ParseFail; 3052 } 3053 3054 if (Parser.getTok().isNot(AsmToken::LCurly)) 3055 return MatchOperand_NoMatch; 3056 3057 Parser.Lex(); // Eat '{' token. 3058 SMLoc RegLoc = Parser.getTok().getLoc(); 3059 3060 int Reg = tryParseRegister(); 3061 if (Reg == -1) { 3062 Error(RegLoc, "register expected"); 3063 return MatchOperand_ParseFail; 3064 } 3065 unsigned Count = 1; 3066 int Spacing = 0; 3067 unsigned FirstReg = Reg; 3068 // The list is of D registers, but we also allow Q regs and just interpret 3069 // them as the two D sub-registers. 3070 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 3071 FirstReg = Reg = getDRegFromQReg(Reg); 3072 Spacing = 1; // double-spacing requires explicit D registers, otherwise 3073 // it's ambiguous with four-register single spaced. 3074 ++Reg; 3075 ++Count; 3076 } 3077 if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success) 3078 return MatchOperand_ParseFail; 3079 3080 while (Parser.getTok().is(AsmToken::Comma) || 3081 Parser.getTok().is(AsmToken::Minus)) { 3082 if (Parser.getTok().is(AsmToken::Minus)) { 3083 if (!Spacing) 3084 Spacing = 1; // Register range implies a single spaced list. 3085 else if (Spacing == 2) { 3086 Error(Parser.getTok().getLoc(), 3087 "sequential registers in double spaced list"); 3088 return MatchOperand_ParseFail; 3089 } 3090 Parser.Lex(); // Eat the minus. 3091 SMLoc EndLoc = Parser.getTok().getLoc(); 3092 int EndReg = tryParseRegister(); 3093 if (EndReg == -1) { 3094 Error(EndLoc, "register expected"); 3095 return MatchOperand_ParseFail; 3096 } 3097 // Allow Q regs and just interpret them as the two D sub-registers. 3098 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 3099 EndReg = getDRegFromQReg(EndReg) + 1; 3100 // If the register is the same as the start reg, there's nothing 3101 // more to do. 3102 if (Reg == EndReg) 3103 continue; 3104 // The register must be in the same register class as the first. 3105 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { 3106 Error(EndLoc, "invalid register in register list"); 3107 return MatchOperand_ParseFail; 3108 } 3109 // Ranges must go from low to high. 3110 if (Reg > EndReg) { 3111 Error(EndLoc, "bad range in register list"); 3112 return MatchOperand_ParseFail; 3113 } 3114 // Parse the lane specifier if present. 3115 VectorLaneTy NextLaneKind; 3116 unsigned NextLaneIndex; 3117 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 3118 return MatchOperand_ParseFail; 3119 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 3120 Error(EndLoc, "mismatched lane index in register list"); 3121 return MatchOperand_ParseFail; 3122 } 3123 EndLoc = Parser.getTok().getLoc(); 3124 3125 // Add all the registers in the range to the register list. 3126 Count += EndReg - Reg; 3127 Reg = EndReg; 3128 continue; 3129 } 3130 Parser.Lex(); // Eat the comma. 3131 RegLoc = Parser.getTok().getLoc(); 3132 int OldReg = Reg; 3133 Reg = tryParseRegister(); 3134 if (Reg == -1) { 3135 Error(RegLoc, "register expected"); 3136 return MatchOperand_ParseFail; 3137 } 3138 // vector register lists must be contiguous. 3139 // It's OK to use the enumeration values directly here rather, as the 3140 // VFP register classes have the enum sorted properly. 3141 // 3142 // The list is of D registers, but we also allow Q regs and just interpret 3143 // them as the two D sub-registers. 3144 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 3145 if (!Spacing) 3146 Spacing = 1; // Register range implies a single spaced list. 3147 else if (Spacing == 2) { 3148 Error(RegLoc, 3149 "invalid register in double-spaced list (must be 'D' register')"); 3150 return MatchOperand_ParseFail; 3151 } 3152 Reg = getDRegFromQReg(Reg); 3153 if (Reg != OldReg + 1) { 3154 Error(RegLoc, "non-contiguous register range"); 3155 return MatchOperand_ParseFail; 3156 } 3157 ++Reg; 3158 Count += 2; 3159 // Parse the lane specifier if present. 3160 VectorLaneTy NextLaneKind; 3161 unsigned NextLaneIndex; 3162 SMLoc EndLoc = Parser.getTok().getLoc(); 3163 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 3164 return MatchOperand_ParseFail; 3165 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 3166 Error(EndLoc, "mismatched lane index in register list"); 3167 return MatchOperand_ParseFail; 3168 } 3169 continue; 3170 } 3171 // Normal D register. 3172 // Figure out the register spacing (single or double) of the list if 3173 // we don't know it already. 3174 if (!Spacing) 3175 Spacing = 1 + (Reg == OldReg + 2); 3176 3177 // Just check that it's contiguous and keep going. 3178 if (Reg != OldReg + Spacing) { 3179 Error(RegLoc, "non-contiguous register range"); 3180 return MatchOperand_ParseFail; 3181 } 3182 ++Count; 3183 // Parse the lane specifier if present. 3184 VectorLaneTy NextLaneKind; 3185 unsigned NextLaneIndex; 3186 SMLoc EndLoc = Parser.getTok().getLoc(); 3187 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 3188 return MatchOperand_ParseFail; 3189 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 3190 Error(EndLoc, "mismatched lane index in register list"); 3191 return MatchOperand_ParseFail; 3192 } 3193 } 3194 3195 SMLoc E = Parser.getTok().getLoc(); 3196 if (Parser.getTok().isNot(AsmToken::RCurly)) { 3197 Error(E, "'}' expected"); 3198 return MatchOperand_ParseFail; 3199 } 3200 Parser.Lex(); // Eat '}' token. 3201 3202 switch (LaneKind) { 3203 case NoLanes: 3204 // Two-register operands have been converted to the 3205 // composite register classes. 3206 if (Count == 2) { 3207 const MCRegisterClass *RC = (Spacing == 1) ? 3208 &ARMMCRegisterClasses[ARM::DPairRegClassID] : 3209 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; 3210 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); 3211 } 3212 3213 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, 3214 (Spacing == 2), S, E)); 3215 break; 3216 case AllLanes: 3217 // Two-register operands have been converted to the 3218 // composite register classes. 3219 if (Count == 2) { 3220 const MCRegisterClass *RC = (Spacing == 1) ? 3221 &ARMMCRegisterClasses[ARM::DPairRegClassID] : 3222 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; 3223 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); 3224 } 3225 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count, 3226 (Spacing == 2), 3227 S, E)); 3228 break; 3229 case IndexedLane: 3230 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count, 3231 LaneIndex, 3232 (Spacing == 2), 3233 S, E)); 3234 break; 3235 } 3236 return MatchOperand_Success; 3237 } 3238 3239 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 3240 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3241 parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3242 SMLoc S = Parser.getTok().getLoc(); 3243 const AsmToken &Tok = Parser.getTok(); 3244 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 3245 StringRef OptStr = Tok.getString(); 3246 3247 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 3248 .Case("sy", ARM_MB::SY) 3249 .Case("st", ARM_MB::ST) 3250 .Case("sh", ARM_MB::ISH) 3251 .Case("ish", ARM_MB::ISH) 3252 .Case("shst", ARM_MB::ISHST) 3253 .Case("ishst", ARM_MB::ISHST) 3254 .Case("nsh", ARM_MB::NSH) 3255 .Case("un", ARM_MB::NSH) 3256 .Case("nshst", ARM_MB::NSHST) 3257 .Case("unst", ARM_MB::NSHST) 3258 .Case("osh", ARM_MB::OSH) 3259 .Case("oshst", ARM_MB::OSHST) 3260 .Default(~0U); 3261 3262 if (Opt == ~0U) 3263 return MatchOperand_NoMatch; 3264 3265 Parser.Lex(); // Eat identifier token. 3266 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 3267 return MatchOperand_Success; 3268 } 3269 3270 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 3271 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3272 parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3273 SMLoc S = Parser.getTok().getLoc(); 3274 const AsmToken &Tok = Parser.getTok(); 3275 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 3276 StringRef IFlagsStr = Tok.getString(); 3277 3278 // An iflags string of "none" is interpreted to mean that none of the AIF 3279 // bits are set. Not a terribly useful instruction, but a valid encoding. 3280 unsigned IFlags = 0; 3281 if (IFlagsStr != "none") { 3282 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 3283 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 3284 .Case("a", ARM_PROC::A) 3285 .Case("i", ARM_PROC::I) 3286 .Case("f", ARM_PROC::F) 3287 .Default(~0U); 3288 3289 // If some specific iflag is already set, it means that some letter is 3290 // present more than once, this is not acceptable. 3291 if (Flag == ~0U || (IFlags & Flag)) 3292 return MatchOperand_NoMatch; 3293 3294 IFlags |= Flag; 3295 } 3296 } 3297 3298 Parser.Lex(); // Eat identifier token. 3299 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 3300 return MatchOperand_Success; 3301 } 3302 3303 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 3304 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3305 parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3306 SMLoc S = Parser.getTok().getLoc(); 3307 const AsmToken &Tok = Parser.getTok(); 3308 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 3309 StringRef Mask = Tok.getString(); 3310 3311 if (isMClass()) { 3312 // See ARMv6-M 10.1.1 3313 std::string Name = Mask.lower(); 3314 unsigned FlagsVal = StringSwitch<unsigned>(Name) 3315 .Case("apsr", 0) 3316 .Case("iapsr", 1) 3317 .Case("eapsr", 2) 3318 .Case("xpsr", 3) 3319 .Case("ipsr", 5) 3320 .Case("epsr", 6) 3321 .Case("iepsr", 7) 3322 .Case("msp", 8) 3323 .Case("psp", 9) 3324 .Case("primask", 16) 3325 .Case("basepri", 17) 3326 .Case("basepri_max", 18) 3327 .Case("faultmask", 19) 3328 .Case("control", 20) 3329 .Default(~0U); 3330 3331 if (FlagsVal == ~0U) 3332 return MatchOperand_NoMatch; 3333 3334 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 3335 // basepri, basepri_max and faultmask only valid for V7m. 3336 return MatchOperand_NoMatch; 3337 3338 Parser.Lex(); // Eat identifier token. 3339 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3340 return MatchOperand_Success; 3341 } 3342 3343 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 3344 size_t Start = 0, Next = Mask.find('_'); 3345 StringRef Flags = ""; 3346 std::string SpecReg = Mask.slice(Start, Next).lower(); 3347 if (Next != StringRef::npos) 3348 Flags = Mask.slice(Next+1, Mask.size()); 3349 3350 // FlagsVal contains the complete mask: 3351 // 3-0: Mask 3352 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3353 unsigned FlagsVal = 0; 3354 3355 if (SpecReg == "apsr") { 3356 FlagsVal = StringSwitch<unsigned>(Flags) 3357 .Case("nzcvq", 0x8) // same as CPSR_f 3358 .Case("g", 0x4) // same as CPSR_s 3359 .Case("nzcvqg", 0xc) // same as CPSR_fs 3360 .Default(~0U); 3361 3362 if (FlagsVal == ~0U) { 3363 if (!Flags.empty()) 3364 return MatchOperand_NoMatch; 3365 else 3366 FlagsVal = 8; // No flag 3367 } 3368 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 3369 // cpsr_all is an alias for cpsr_fc, as is plain cpsr. 3370 if (Flags == "all" || Flags == "") 3371 Flags = "fc"; 3372 for (int i = 0, e = Flags.size(); i != e; ++i) { 3373 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 3374 .Case("c", 1) 3375 .Case("x", 2) 3376 .Case("s", 4) 3377 .Case("f", 8) 3378 .Default(~0U); 3379 3380 // If some specific flag is already set, it means that some letter is 3381 // present more than once, this is not acceptable. 3382 if (FlagsVal == ~0U || (FlagsVal & Flag)) 3383 return MatchOperand_NoMatch; 3384 FlagsVal |= Flag; 3385 } 3386 } else // No match for special register. 3387 return MatchOperand_NoMatch; 3388 3389 // Special register without flags is NOT equivalent to "fc" flags. 3390 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 3391 // two lines would enable gas compatibility at the expense of breaking 3392 // round-tripping. 3393 // 3394 // if (!FlagsVal) 3395 // FlagsVal = 0x9; 3396 3397 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 3398 if (SpecReg == "spsr") 3399 FlagsVal |= 16; 3400 3401 Parser.Lex(); // Eat identifier token. 3402 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 3403 return MatchOperand_Success; 3404 } 3405 3406 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3407 parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 3408 int Low, int High) { 3409 const AsmToken &Tok = Parser.getTok(); 3410 if (Tok.isNot(AsmToken::Identifier)) { 3411 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3412 return MatchOperand_ParseFail; 3413 } 3414 StringRef ShiftName = Tok.getString(); 3415 std::string LowerOp = Op.lower(); 3416 std::string UpperOp = Op.upper(); 3417 if (ShiftName != LowerOp && ShiftName != UpperOp) { 3418 Error(Parser.getTok().getLoc(), Op + " operand expected."); 3419 return MatchOperand_ParseFail; 3420 } 3421 Parser.Lex(); // Eat shift type token. 3422 3423 // There must be a '#' and a shift amount. 3424 if (Parser.getTok().isNot(AsmToken::Hash) && 3425 Parser.getTok().isNot(AsmToken::Dollar)) { 3426 Error(Parser.getTok().getLoc(), "'#' expected"); 3427 return MatchOperand_ParseFail; 3428 } 3429 Parser.Lex(); // Eat hash token. 3430 3431 const MCExpr *ShiftAmount; 3432 SMLoc Loc = Parser.getTok().getLoc(); 3433 if (getParser().ParseExpression(ShiftAmount)) { 3434 Error(Loc, "illegal expression"); 3435 return MatchOperand_ParseFail; 3436 } 3437 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3438 if (!CE) { 3439 Error(Loc, "constant expression expected"); 3440 return MatchOperand_ParseFail; 3441 } 3442 int Val = CE->getValue(); 3443 if (Val < Low || Val > High) { 3444 Error(Loc, "immediate value out of range"); 3445 return MatchOperand_ParseFail; 3446 } 3447 3448 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 3449 3450 return MatchOperand_Success; 3451 } 3452 3453 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3454 parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3455 const AsmToken &Tok = Parser.getTok(); 3456 SMLoc S = Tok.getLoc(); 3457 if (Tok.isNot(AsmToken::Identifier)) { 3458 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3459 return MatchOperand_ParseFail; 3460 } 3461 int Val = StringSwitch<int>(Tok.getString()) 3462 .Case("be", 1) 3463 .Case("le", 0) 3464 .Default(-1); 3465 Parser.Lex(); // Eat the token. 3466 3467 if (Val == -1) { 3468 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3469 return MatchOperand_ParseFail; 3470 } 3471 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 3472 getContext()), 3473 S, Parser.getTok().getLoc())); 3474 return MatchOperand_Success; 3475 } 3476 3477 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 3478 /// instructions. Legal values are: 3479 /// lsl #n 'n' in [0,31] 3480 /// asr #n 'n' in [1,32] 3481 /// n == 32 encoded as n == 0. 3482 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3483 parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3484 const AsmToken &Tok = Parser.getTok(); 3485 SMLoc S = Tok.getLoc(); 3486 if (Tok.isNot(AsmToken::Identifier)) { 3487 Error(S, "shift operator 'asr' or 'lsl' expected"); 3488 return MatchOperand_ParseFail; 3489 } 3490 StringRef ShiftName = Tok.getString(); 3491 bool isASR; 3492 if (ShiftName == "lsl" || ShiftName == "LSL") 3493 isASR = false; 3494 else if (ShiftName == "asr" || ShiftName == "ASR") 3495 isASR = true; 3496 else { 3497 Error(S, "shift operator 'asr' or 'lsl' expected"); 3498 return MatchOperand_ParseFail; 3499 } 3500 Parser.Lex(); // Eat the operator. 3501 3502 // A '#' and a shift amount. 3503 if (Parser.getTok().isNot(AsmToken::Hash) && 3504 Parser.getTok().isNot(AsmToken::Dollar)) { 3505 Error(Parser.getTok().getLoc(), "'#' expected"); 3506 return MatchOperand_ParseFail; 3507 } 3508 Parser.Lex(); // Eat hash token. 3509 3510 const MCExpr *ShiftAmount; 3511 SMLoc E = Parser.getTok().getLoc(); 3512 if (getParser().ParseExpression(ShiftAmount)) { 3513 Error(E, "malformed shift expression"); 3514 return MatchOperand_ParseFail; 3515 } 3516 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3517 if (!CE) { 3518 Error(E, "shift amount must be an immediate"); 3519 return MatchOperand_ParseFail; 3520 } 3521 3522 int64_t Val = CE->getValue(); 3523 if (isASR) { 3524 // Shift amount must be in [1,32] 3525 if (Val < 1 || Val > 32) { 3526 Error(E, "'asr' shift amount must be in range [1,32]"); 3527 return MatchOperand_ParseFail; 3528 } 3529 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 3530 if (isThumb() && Val == 32) { 3531 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 3532 return MatchOperand_ParseFail; 3533 } 3534 if (Val == 32) Val = 0; 3535 } else { 3536 // Shift amount must be in [1,32] 3537 if (Val < 0 || Val > 31) { 3538 Error(E, "'lsr' shift amount must be in range [0,31]"); 3539 return MatchOperand_ParseFail; 3540 } 3541 } 3542 3543 E = Parser.getTok().getLoc(); 3544 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 3545 3546 return MatchOperand_Success; 3547 } 3548 3549 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 3550 /// of instructions. Legal values are: 3551 /// ror #n 'n' in {0, 8, 16, 24} 3552 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3553 parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3554 const AsmToken &Tok = Parser.getTok(); 3555 SMLoc S = Tok.getLoc(); 3556 if (Tok.isNot(AsmToken::Identifier)) 3557 return MatchOperand_NoMatch; 3558 StringRef ShiftName = Tok.getString(); 3559 if (ShiftName != "ror" && ShiftName != "ROR") 3560 return MatchOperand_NoMatch; 3561 Parser.Lex(); // Eat the operator. 3562 3563 // A '#' and a rotate amount. 3564 if (Parser.getTok().isNot(AsmToken::Hash) && 3565 Parser.getTok().isNot(AsmToken::Dollar)) { 3566 Error(Parser.getTok().getLoc(), "'#' expected"); 3567 return MatchOperand_ParseFail; 3568 } 3569 Parser.Lex(); // Eat hash token. 3570 3571 const MCExpr *ShiftAmount; 3572 SMLoc E = Parser.getTok().getLoc(); 3573 if (getParser().ParseExpression(ShiftAmount)) { 3574 Error(E, "malformed rotate expression"); 3575 return MatchOperand_ParseFail; 3576 } 3577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3578 if (!CE) { 3579 Error(E, "rotate amount must be an immediate"); 3580 return MatchOperand_ParseFail; 3581 } 3582 3583 int64_t Val = CE->getValue(); 3584 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 3585 // normally, zero is represented in asm by omitting the rotate operand 3586 // entirely. 3587 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 3588 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 3589 return MatchOperand_ParseFail; 3590 } 3591 3592 E = Parser.getTok().getLoc(); 3593 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 3594 3595 return MatchOperand_Success; 3596 } 3597 3598 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3599 parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3600 SMLoc S = Parser.getTok().getLoc(); 3601 // The bitfield descriptor is really two operands, the LSB and the width. 3602 if (Parser.getTok().isNot(AsmToken::Hash) && 3603 Parser.getTok().isNot(AsmToken::Dollar)) { 3604 Error(Parser.getTok().getLoc(), "'#' expected"); 3605 return MatchOperand_ParseFail; 3606 } 3607 Parser.Lex(); // Eat hash token. 3608 3609 const MCExpr *LSBExpr; 3610 SMLoc E = Parser.getTok().getLoc(); 3611 if (getParser().ParseExpression(LSBExpr)) { 3612 Error(E, "malformed immediate expression"); 3613 return MatchOperand_ParseFail; 3614 } 3615 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 3616 if (!CE) { 3617 Error(E, "'lsb' operand must be an immediate"); 3618 return MatchOperand_ParseFail; 3619 } 3620 3621 int64_t LSB = CE->getValue(); 3622 // The LSB must be in the range [0,31] 3623 if (LSB < 0 || LSB > 31) { 3624 Error(E, "'lsb' operand must be in the range [0,31]"); 3625 return MatchOperand_ParseFail; 3626 } 3627 E = Parser.getTok().getLoc(); 3628 3629 // Expect another immediate operand. 3630 if (Parser.getTok().isNot(AsmToken::Comma)) { 3631 Error(Parser.getTok().getLoc(), "too few operands"); 3632 return MatchOperand_ParseFail; 3633 } 3634 Parser.Lex(); // Eat hash token. 3635 if (Parser.getTok().isNot(AsmToken::Hash) && 3636 Parser.getTok().isNot(AsmToken::Dollar)) { 3637 Error(Parser.getTok().getLoc(), "'#' expected"); 3638 return MatchOperand_ParseFail; 3639 } 3640 Parser.Lex(); // Eat hash token. 3641 3642 const MCExpr *WidthExpr; 3643 if (getParser().ParseExpression(WidthExpr)) { 3644 Error(E, "malformed immediate expression"); 3645 return MatchOperand_ParseFail; 3646 } 3647 CE = dyn_cast<MCConstantExpr>(WidthExpr); 3648 if (!CE) { 3649 Error(E, "'width' operand must be an immediate"); 3650 return MatchOperand_ParseFail; 3651 } 3652 3653 int64_t Width = CE->getValue(); 3654 // The LSB must be in the range [1,32-lsb] 3655 if (Width < 1 || Width > 32 - LSB) { 3656 Error(E, "'width' operand must be in the range [1,32-lsb]"); 3657 return MatchOperand_ParseFail; 3658 } 3659 E = Parser.getTok().getLoc(); 3660 3661 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 3662 3663 return MatchOperand_Success; 3664 } 3665 3666 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3667 parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3668 // Check for a post-index addressing register operand. Specifically: 3669 // postidx_reg := '+' register {, shift} 3670 // | '-' register {, shift} 3671 // | register {, shift} 3672 3673 // This method must return MatchOperand_NoMatch without consuming any tokens 3674 // in the case where there is no match, as other alternatives take other 3675 // parse methods. 3676 AsmToken Tok = Parser.getTok(); 3677 SMLoc S = Tok.getLoc(); 3678 bool haveEaten = false; 3679 bool isAdd = true; 3680 int Reg = -1; 3681 if (Tok.is(AsmToken::Plus)) { 3682 Parser.Lex(); // Eat the '+' token. 3683 haveEaten = true; 3684 } else if (Tok.is(AsmToken::Minus)) { 3685 Parser.Lex(); // Eat the '-' token. 3686 isAdd = false; 3687 haveEaten = true; 3688 } 3689 if (Parser.getTok().is(AsmToken::Identifier)) 3690 Reg = tryParseRegister(); 3691 if (Reg == -1) { 3692 if (!haveEaten) 3693 return MatchOperand_NoMatch; 3694 Error(Parser.getTok().getLoc(), "register expected"); 3695 return MatchOperand_ParseFail; 3696 } 3697 SMLoc E = Parser.getTok().getLoc(); 3698 3699 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 3700 unsigned ShiftImm = 0; 3701 if (Parser.getTok().is(AsmToken::Comma)) { 3702 Parser.Lex(); // Eat the ','. 3703 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 3704 return MatchOperand_ParseFail; 3705 } 3706 3707 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 3708 ShiftImm, S, E)); 3709 3710 return MatchOperand_Success; 3711 } 3712 3713 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3714 parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3715 // Check for a post-index addressing register operand. Specifically: 3716 // am3offset := '+' register 3717 // | '-' register 3718 // | register 3719 // | # imm 3720 // | # + imm 3721 // | # - imm 3722 3723 // This method must return MatchOperand_NoMatch without consuming any tokens 3724 // in the case where there is no match, as other alternatives take other 3725 // parse methods. 3726 AsmToken Tok = Parser.getTok(); 3727 SMLoc S = Tok.getLoc(); 3728 3729 // Do immediates first, as we always parse those if we have a '#'. 3730 if (Parser.getTok().is(AsmToken::Hash) || 3731 Parser.getTok().is(AsmToken::Dollar)) { 3732 Parser.Lex(); // Eat the '#'. 3733 // Explicitly look for a '-', as we need to encode negative zero 3734 // differently. 3735 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3736 const MCExpr *Offset; 3737 if (getParser().ParseExpression(Offset)) 3738 return MatchOperand_ParseFail; 3739 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3740 if (!CE) { 3741 Error(S, "constant expression expected"); 3742 return MatchOperand_ParseFail; 3743 } 3744 SMLoc E = Tok.getLoc(); 3745 // Negative zero is encoded as the flag value INT32_MIN. 3746 int32_t Val = CE->getValue(); 3747 if (isNegative && Val == 0) 3748 Val = INT32_MIN; 3749 3750 Operands.push_back( 3751 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 3752 3753 return MatchOperand_Success; 3754 } 3755 3756 3757 bool haveEaten = false; 3758 bool isAdd = true; 3759 int Reg = -1; 3760 if (Tok.is(AsmToken::Plus)) { 3761 Parser.Lex(); // Eat the '+' token. 3762 haveEaten = true; 3763 } else if (Tok.is(AsmToken::Minus)) { 3764 Parser.Lex(); // Eat the '-' token. 3765 isAdd = false; 3766 haveEaten = true; 3767 } 3768 if (Parser.getTok().is(AsmToken::Identifier)) 3769 Reg = tryParseRegister(); 3770 if (Reg == -1) { 3771 if (!haveEaten) 3772 return MatchOperand_NoMatch; 3773 Error(Parser.getTok().getLoc(), "register expected"); 3774 return MatchOperand_ParseFail; 3775 } 3776 SMLoc E = Parser.getTok().getLoc(); 3777 3778 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3779 0, S, E)); 3780 3781 return MatchOperand_Success; 3782 } 3783 3784 /// cvtT2LdrdPre - Convert parsed operands to MCInst. 3785 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3786 /// when they refer multiple MIOperands inside a single one. 3787 bool ARMAsmParser:: 3788 cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3789 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3790 // Rt, Rt2 3791 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3792 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3793 // Create a writeback register dummy placeholder. 3794 Inst.addOperand(MCOperand::CreateReg(0)); 3795 // addr 3796 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3797 // pred 3798 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3799 return true; 3800 } 3801 3802 /// cvtT2StrdPre - Convert parsed operands to MCInst. 3803 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3804 /// when they refer multiple MIOperands inside a single one. 3805 bool ARMAsmParser:: 3806 cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3807 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3808 // Create a writeback register dummy placeholder. 3809 Inst.addOperand(MCOperand::CreateReg(0)); 3810 // Rt, Rt2 3811 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3812 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3813 // addr 3814 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3815 // pred 3816 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3817 return true; 3818 } 3819 3820 /// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3821 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3822 /// when they refer multiple MIOperands inside a single one. 3823 bool ARMAsmParser:: 3824 cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3825 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3826 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3827 3828 // Create a writeback register dummy placeholder. 3829 Inst.addOperand(MCOperand::CreateImm(0)); 3830 3831 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3832 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3833 return true; 3834 } 3835 3836 /// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3837 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3838 /// when they refer multiple MIOperands inside a single one. 3839 bool ARMAsmParser:: 3840 cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3841 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3842 // Create a writeback register dummy placeholder. 3843 Inst.addOperand(MCOperand::CreateImm(0)); 3844 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3845 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3846 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3847 return true; 3848 } 3849 3850 /// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3851 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3852 /// when they refer multiple MIOperands inside a single one. 3853 bool ARMAsmParser:: 3854 cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3855 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3856 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3857 3858 // Create a writeback register dummy placeholder. 3859 Inst.addOperand(MCOperand::CreateImm(0)); 3860 3861 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3862 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3863 return true; 3864 } 3865 3866 /// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3867 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3868 /// when they refer multiple MIOperands inside a single one. 3869 bool ARMAsmParser:: 3870 cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3871 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3872 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3873 3874 // Create a writeback register dummy placeholder. 3875 Inst.addOperand(MCOperand::CreateImm(0)); 3876 3877 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3878 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3879 return true; 3880 } 3881 3882 3883 /// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3884 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3885 /// when they refer multiple MIOperands inside a single one. 3886 bool ARMAsmParser:: 3887 cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3888 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3889 // Create a writeback register dummy placeholder. 3890 Inst.addOperand(MCOperand::CreateImm(0)); 3891 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3892 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3893 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3894 return true; 3895 } 3896 3897 /// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3898 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3899 /// when they refer multiple MIOperands inside a single one. 3900 bool ARMAsmParser:: 3901 cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3902 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3903 // Create a writeback register dummy placeholder. 3904 Inst.addOperand(MCOperand::CreateImm(0)); 3905 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3906 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3907 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3908 return true; 3909 } 3910 3911 /// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3912 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3913 /// when they refer multiple MIOperands inside a single one. 3914 bool ARMAsmParser:: 3915 cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3916 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3917 // Create a writeback register dummy placeholder. 3918 Inst.addOperand(MCOperand::CreateImm(0)); 3919 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3920 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3921 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3922 return true; 3923 } 3924 3925 /// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3926 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3927 /// when they refer multiple MIOperands inside a single one. 3928 bool ARMAsmParser:: 3929 cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3930 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3931 // Rt 3932 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3933 // Create a writeback register dummy placeholder. 3934 Inst.addOperand(MCOperand::CreateImm(0)); 3935 // addr 3936 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3937 // offset 3938 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3939 // pred 3940 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3941 return true; 3942 } 3943 3944 /// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3945 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3946 /// when they refer multiple MIOperands inside a single one. 3947 bool ARMAsmParser:: 3948 cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3949 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3950 // Rt 3951 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3952 // Create a writeback register dummy placeholder. 3953 Inst.addOperand(MCOperand::CreateImm(0)); 3954 // addr 3955 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3956 // offset 3957 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3958 // pred 3959 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3960 return true; 3961 } 3962 3963 /// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3964 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3965 /// when they refer multiple MIOperands inside a single one. 3966 bool ARMAsmParser:: 3967 cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3968 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3969 // Create a writeback register dummy placeholder. 3970 Inst.addOperand(MCOperand::CreateImm(0)); 3971 // Rt 3972 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3973 // addr 3974 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3975 // offset 3976 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3977 // pred 3978 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3979 return true; 3980 } 3981 3982 /// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3983 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3984 /// when they refer multiple MIOperands inside a single one. 3985 bool ARMAsmParser:: 3986 cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3987 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3988 // Create a writeback register dummy placeholder. 3989 Inst.addOperand(MCOperand::CreateImm(0)); 3990 // Rt 3991 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3992 // addr 3993 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3994 // offset 3995 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3996 // pred 3997 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3998 return true; 3999 } 4000 4001 /// cvtLdrdPre - Convert parsed operands to MCInst. 4002 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 4003 /// when they refer multiple MIOperands inside a single one. 4004 bool ARMAsmParser:: 4005 cvtLdrdPre(MCInst &Inst, unsigned Opcode, 4006 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4007 // Rt, Rt2 4008 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 4009 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 4010 // Create a writeback register dummy placeholder. 4011 Inst.addOperand(MCOperand::CreateImm(0)); 4012 // addr 4013 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 4014 // pred 4015 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4016 return true; 4017 } 4018 4019 /// cvtStrdPre - Convert parsed operands to MCInst. 4020 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 4021 /// when they refer multiple MIOperands inside a single one. 4022 bool ARMAsmParser:: 4023 cvtStrdPre(MCInst &Inst, unsigned Opcode, 4024 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4025 // Create a writeback register dummy placeholder. 4026 Inst.addOperand(MCOperand::CreateImm(0)); 4027 // Rt, Rt2 4028 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 4029 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 4030 // addr 4031 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 4032 // pred 4033 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4034 return true; 4035 } 4036 4037 /// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 4038 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 4039 /// when they refer multiple MIOperands inside a single one. 4040 bool ARMAsmParser:: 4041 cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 4042 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4043 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 4044 // Create a writeback register dummy placeholder. 4045 Inst.addOperand(MCOperand::CreateImm(0)); 4046 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 4047 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4048 return true; 4049 } 4050 4051 /// cvtThumbMultiple- Convert parsed operands to MCInst. 4052 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 4053 /// when they refer multiple MIOperands inside a single one. 4054 bool ARMAsmParser:: 4055 cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 4056 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4057 // The second source operand must be the same register as the destination 4058 // operand. 4059 if (Operands.size() == 6 && 4060 (((ARMOperand*)Operands[3])->getReg() != 4061 ((ARMOperand*)Operands[5])->getReg()) && 4062 (((ARMOperand*)Operands[3])->getReg() != 4063 ((ARMOperand*)Operands[4])->getReg())) { 4064 Error(Operands[3]->getStartLoc(), 4065 "destination register must match source register"); 4066 return false; 4067 } 4068 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 4069 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 4070 // If we have a three-operand form, make sure to set Rn to be the operand 4071 // that isn't the same as Rd. 4072 unsigned RegOp = 4; 4073 if (Operands.size() == 6 && 4074 ((ARMOperand*)Operands[4])->getReg() == 4075 ((ARMOperand*)Operands[3])->getReg()) 4076 RegOp = 5; 4077 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); 4078 Inst.addOperand(Inst.getOperand(0)); 4079 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 4080 4081 return true; 4082 } 4083 4084 bool ARMAsmParser:: 4085 cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 4086 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4087 // Vd 4088 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 4089 // Create a writeback register dummy placeholder. 4090 Inst.addOperand(MCOperand::CreateImm(0)); 4091 // Vn 4092 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 4093 // pred 4094 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4095 return true; 4096 } 4097 4098 bool ARMAsmParser:: 4099 cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 4100 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4101 // Vd 4102 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 4103 // Create a writeback register dummy placeholder. 4104 Inst.addOperand(MCOperand::CreateImm(0)); 4105 // Vn 4106 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 4107 // Vm 4108 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 4109 // pred 4110 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4111 return true; 4112 } 4113 4114 bool ARMAsmParser:: 4115 cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 4116 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4117 // Create a writeback register dummy placeholder. 4118 Inst.addOperand(MCOperand::CreateImm(0)); 4119 // Vn 4120 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 4121 // Vt 4122 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 4123 // pred 4124 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4125 return true; 4126 } 4127 4128 bool ARMAsmParser:: 4129 cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 4130 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4131 // Create a writeback register dummy placeholder. 4132 Inst.addOperand(MCOperand::CreateImm(0)); 4133 // Vn 4134 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 4135 // Vm 4136 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 4137 // Vt 4138 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 4139 // pred 4140 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 4141 return true; 4142 } 4143 4144 /// Parse an ARM memory expression, return false if successful else return true 4145 /// or an error. The first token must be a '[' when called. 4146 bool ARMAsmParser:: 4147 parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4148 SMLoc S, E; 4149 assert(Parser.getTok().is(AsmToken::LBrac) && 4150 "Token is not a Left Bracket"); 4151 S = Parser.getTok().getLoc(); 4152 Parser.Lex(); // Eat left bracket token. 4153 4154 const AsmToken &BaseRegTok = Parser.getTok(); 4155 int BaseRegNum = tryParseRegister(); 4156 if (BaseRegNum == -1) 4157 return Error(BaseRegTok.getLoc(), "register expected"); 4158 4159 // The next token must either be a comma or a closing bracket. 4160 const AsmToken &Tok = Parser.getTok(); 4161 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 4162 return Error(Tok.getLoc(), "malformed memory operand"); 4163 4164 if (Tok.is(AsmToken::RBrac)) { 4165 E = Tok.getLoc(); 4166 Parser.Lex(); // Eat right bracket token. 4167 4168 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 4169 0, 0, false, S, E)); 4170 4171 // If there's a pre-indexing writeback marker, '!', just add it as a token 4172 // operand. It's rather odd, but syntactically valid. 4173 if (Parser.getTok().is(AsmToken::Exclaim)) { 4174 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4175 Parser.Lex(); // Eat the '!'. 4176 } 4177 4178 return false; 4179 } 4180 4181 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 4182 Parser.Lex(); // Eat the comma. 4183 4184 // If we have a ':', it's an alignment specifier. 4185 if (Parser.getTok().is(AsmToken::Colon)) { 4186 Parser.Lex(); // Eat the ':'. 4187 E = Parser.getTok().getLoc(); 4188 4189 const MCExpr *Expr; 4190 if (getParser().ParseExpression(Expr)) 4191 return true; 4192 4193 // The expression has to be a constant. Memory references with relocations 4194 // don't come through here, as they use the <label> forms of the relevant 4195 // instructions. 4196 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 4197 if (!CE) 4198 return Error (E, "constant expression expected"); 4199 4200 unsigned Align = 0; 4201 switch (CE->getValue()) { 4202 default: 4203 return Error(E, 4204 "alignment specifier must be 16, 32, 64, 128, or 256 bits"); 4205 case 16: Align = 2; break; 4206 case 32: Align = 4; break; 4207 case 64: Align = 8; break; 4208 case 128: Align = 16; break; 4209 case 256: Align = 32; break; 4210 } 4211 4212 // Now we should have the closing ']' 4213 E = Parser.getTok().getLoc(); 4214 if (Parser.getTok().isNot(AsmToken::RBrac)) 4215 return Error(E, "']' expected"); 4216 Parser.Lex(); // Eat right bracket token. 4217 4218 // Don't worry about range checking the value here. That's handled by 4219 // the is*() predicates. 4220 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 4221 ARM_AM::no_shift, 0, Align, 4222 false, S, E)); 4223 4224 // If there's a pre-indexing writeback marker, '!', just add it as a token 4225 // operand. 4226 if (Parser.getTok().is(AsmToken::Exclaim)) { 4227 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4228 Parser.Lex(); // Eat the '!'. 4229 } 4230 4231 return false; 4232 } 4233 4234 // If we have a '#', it's an immediate offset, else assume it's a register 4235 // offset. Be friendly and also accept a plain integer (without a leading 4236 // hash) for gas compatibility. 4237 if (Parser.getTok().is(AsmToken::Hash) || 4238 Parser.getTok().is(AsmToken::Dollar) || 4239 Parser.getTok().is(AsmToken::Integer)) { 4240 if (Parser.getTok().isNot(AsmToken::Integer)) 4241 Parser.Lex(); // Eat the '#'. 4242 E = Parser.getTok().getLoc(); 4243 4244 bool isNegative = getParser().getTok().is(AsmToken::Minus); 4245 const MCExpr *Offset; 4246 if (getParser().ParseExpression(Offset)) 4247 return true; 4248 4249 // The expression has to be a constant. Memory references with relocations 4250 // don't come through here, as they use the <label> forms of the relevant 4251 // instructions. 4252 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 4253 if (!CE) 4254 return Error (E, "constant expression expected"); 4255 4256 // If the constant was #-0, represent it as INT32_MIN. 4257 int32_t Val = CE->getValue(); 4258 if (isNegative && Val == 0) 4259 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 4260 4261 // Now we should have the closing ']' 4262 E = Parser.getTok().getLoc(); 4263 if (Parser.getTok().isNot(AsmToken::RBrac)) 4264 return Error(E, "']' expected"); 4265 Parser.Lex(); // Eat right bracket token. 4266 4267 // Don't worry about range checking the value here. That's handled by 4268 // the is*() predicates. 4269 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 4270 ARM_AM::no_shift, 0, 0, 4271 false, S, E)); 4272 4273 // If there's a pre-indexing writeback marker, '!', just add it as a token 4274 // operand. 4275 if (Parser.getTok().is(AsmToken::Exclaim)) { 4276 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4277 Parser.Lex(); // Eat the '!'. 4278 } 4279 4280 return false; 4281 } 4282 4283 // The register offset is optionally preceded by a '+' or '-' 4284 bool isNegative = false; 4285 if (Parser.getTok().is(AsmToken::Minus)) { 4286 isNegative = true; 4287 Parser.Lex(); // Eat the '-'. 4288 } else if (Parser.getTok().is(AsmToken::Plus)) { 4289 // Nothing to do. 4290 Parser.Lex(); // Eat the '+'. 4291 } 4292 4293 E = Parser.getTok().getLoc(); 4294 int OffsetRegNum = tryParseRegister(); 4295 if (OffsetRegNum == -1) 4296 return Error(E, "register expected"); 4297 4298 // If there's a shift operator, handle it. 4299 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 4300 unsigned ShiftImm = 0; 4301 if (Parser.getTok().is(AsmToken::Comma)) { 4302 Parser.Lex(); // Eat the ','. 4303 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 4304 return true; 4305 } 4306 4307 // Now we should have the closing ']' 4308 E = Parser.getTok().getLoc(); 4309 if (Parser.getTok().isNot(AsmToken::RBrac)) 4310 return Error(E, "']' expected"); 4311 Parser.Lex(); // Eat right bracket token. 4312 4313 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 4314 ShiftType, ShiftImm, 0, isNegative, 4315 S, E)); 4316 4317 // If there's a pre-indexing writeback marker, '!', just add it as a token 4318 // operand. 4319 if (Parser.getTok().is(AsmToken::Exclaim)) { 4320 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 4321 Parser.Lex(); // Eat the '!'. 4322 } 4323 4324 return false; 4325 } 4326 4327 /// parseMemRegOffsetShift - one of these two: 4328 /// ( lsl | lsr | asr | ror ) , # shift_amount 4329 /// rrx 4330 /// return true if it parses a shift otherwise it returns false. 4331 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 4332 unsigned &Amount) { 4333 SMLoc Loc = Parser.getTok().getLoc(); 4334 const AsmToken &Tok = Parser.getTok(); 4335 if (Tok.isNot(AsmToken::Identifier)) 4336 return true; 4337 StringRef ShiftName = Tok.getString(); 4338 if (ShiftName == "lsl" || ShiftName == "LSL" || 4339 ShiftName == "asl" || ShiftName == "ASL") 4340 St = ARM_AM::lsl; 4341 else if (ShiftName == "lsr" || ShiftName == "LSR") 4342 St = ARM_AM::lsr; 4343 else if (ShiftName == "asr" || ShiftName == "ASR") 4344 St = ARM_AM::asr; 4345 else if (ShiftName == "ror" || ShiftName == "ROR") 4346 St = ARM_AM::ror; 4347 else if (ShiftName == "rrx" || ShiftName == "RRX") 4348 St = ARM_AM::rrx; 4349 else 4350 return Error(Loc, "illegal shift operator"); 4351 Parser.Lex(); // Eat shift type token. 4352 4353 // rrx stands alone. 4354 Amount = 0; 4355 if (St != ARM_AM::rrx) { 4356 Loc = Parser.getTok().getLoc(); 4357 // A '#' and a shift amount. 4358 const AsmToken &HashTok = Parser.getTok(); 4359 if (HashTok.isNot(AsmToken::Hash) && 4360 HashTok.isNot(AsmToken::Dollar)) 4361 return Error(HashTok.getLoc(), "'#' expected"); 4362 Parser.Lex(); // Eat hash token. 4363 4364 const MCExpr *Expr; 4365 if (getParser().ParseExpression(Expr)) 4366 return true; 4367 // Range check the immediate. 4368 // lsl, ror: 0 <= imm <= 31 4369 // lsr, asr: 0 <= imm <= 32 4370 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 4371 if (!CE) 4372 return Error(Loc, "shift amount must be an immediate"); 4373 int64_t Imm = CE->getValue(); 4374 if (Imm < 0 || 4375 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 4376 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 4377 return Error(Loc, "immediate shift value out of range"); 4378 Amount = Imm; 4379 } 4380 4381 return false; 4382 } 4383 4384 /// parseFPImm - A floating point immediate expression operand. 4385 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 4386 parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4387 // Anything that can accept a floating point constant as an operand 4388 // needs to go through here, as the regular ParseExpression is 4389 // integer only. 4390 // 4391 // This routine still creates a generic Immediate operand, containing 4392 // a bitcast of the 64-bit floating point value. The various operands 4393 // that accept floats can check whether the value is valid for them 4394 // via the standard is*() predicates. 4395 4396 SMLoc S = Parser.getTok().getLoc(); 4397 4398 if (Parser.getTok().isNot(AsmToken::Hash) && 4399 Parser.getTok().isNot(AsmToken::Dollar)) 4400 return MatchOperand_NoMatch; 4401 4402 // Disambiguate the VMOV forms that can accept an FP immediate. 4403 // vmov.f32 <sreg>, #imm 4404 // vmov.f64 <dreg>, #imm 4405 // vmov.f32 <dreg>, #imm @ vector f32x2 4406 // vmov.f32 <qreg>, #imm @ vector f32x4 4407 // 4408 // There are also the NEON VMOV instructions which expect an 4409 // integer constant. Make sure we don't try to parse an FPImm 4410 // for these: 4411 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 4412 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 4413 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 4414 TyOp->getToken() != ".f64")) 4415 return MatchOperand_NoMatch; 4416 4417 Parser.Lex(); // Eat the '#'. 4418 4419 // Handle negation, as that still comes through as a separate token. 4420 bool isNegative = false; 4421 if (Parser.getTok().is(AsmToken::Minus)) { 4422 isNegative = true; 4423 Parser.Lex(); 4424 } 4425 const AsmToken &Tok = Parser.getTok(); 4426 SMLoc Loc = Tok.getLoc(); 4427 if (Tok.is(AsmToken::Real)) { 4428 APFloat RealVal(APFloat::IEEEsingle, Tok.getString()); 4429 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 4430 // If we had a '-' in front, toggle the sign bit. 4431 IntVal ^= (uint64_t)isNegative << 31; 4432 Parser.Lex(); // Eat the token. 4433 Operands.push_back(ARMOperand::CreateImm( 4434 MCConstantExpr::Create(IntVal, getContext()), 4435 S, Parser.getTok().getLoc())); 4436 return MatchOperand_Success; 4437 } 4438 // Also handle plain integers. Instructions which allow floating point 4439 // immediates also allow a raw encoded 8-bit value. 4440 if (Tok.is(AsmToken::Integer)) { 4441 int64_t Val = Tok.getIntVal(); 4442 Parser.Lex(); // Eat the token. 4443 if (Val > 255 || Val < 0) { 4444 Error(Loc, "encoded floating point value out of range"); 4445 return MatchOperand_ParseFail; 4446 } 4447 double RealVal = ARM_AM::getFPImmFloat(Val); 4448 Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue(); 4449 Operands.push_back(ARMOperand::CreateImm( 4450 MCConstantExpr::Create(Val, getContext()), S, 4451 Parser.getTok().getLoc())); 4452 return MatchOperand_Success; 4453 } 4454 4455 Error(Loc, "invalid floating point immediate"); 4456 return MatchOperand_ParseFail; 4457 } 4458 4459 /// Parse a arm instruction operand. For now this parses the operand regardless 4460 /// of the mnemonic. 4461 bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4462 StringRef Mnemonic) { 4463 SMLoc S, E; 4464 4465 // Check if the current operand has a custom associated parser, if so, try to 4466 // custom parse the operand, or fallback to the general approach. 4467 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 4468 if (ResTy == MatchOperand_Success) 4469 return false; 4470 // If there wasn't a custom match, try the generic matcher below. Otherwise, 4471 // there was a match, but an error occurred, in which case, just return that 4472 // the operand parsing failed. 4473 if (ResTy == MatchOperand_ParseFail) 4474 return true; 4475 4476 switch (getLexer().getKind()) { 4477 default: 4478 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 4479 return true; 4480 case AsmToken::Identifier: { 4481 if (!tryParseRegisterWithWriteBack(Operands)) 4482 return false; 4483 int Res = tryParseShiftRegister(Operands); 4484 if (Res == 0) // success 4485 return false; 4486 else if (Res == -1) // irrecoverable error 4487 return true; 4488 // If this is VMRS, check for the apsr_nzcv operand. 4489 if (Mnemonic == "vmrs" && 4490 Parser.getTok().getString().equals_lower("apsr_nzcv")) { 4491 S = Parser.getTok().getLoc(); 4492 Parser.Lex(); 4493 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S)); 4494 return false; 4495 } 4496 4497 // Fall though for the Identifier case that is not a register or a 4498 // special name. 4499 } 4500 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 4501 case AsmToken::Integer: // things like 1f and 2b as a branch targets 4502 case AsmToken::String: // quoted label names. 4503 case AsmToken::Dot: { // . as a branch target 4504 // This was not a register so parse other operands that start with an 4505 // identifier (like labels) as expressions and create them as immediates. 4506 const MCExpr *IdVal; 4507 S = Parser.getTok().getLoc(); 4508 if (getParser().ParseExpression(IdVal)) 4509 return true; 4510 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4511 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 4512 return false; 4513 } 4514 case AsmToken::LBrac: 4515 return parseMemory(Operands); 4516 case AsmToken::LCurly: 4517 return parseRegisterList(Operands); 4518 case AsmToken::Dollar: 4519 case AsmToken::Hash: { 4520 // #42 -> immediate. 4521 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 4522 S = Parser.getTok().getLoc(); 4523 Parser.Lex(); 4524 bool isNegative = Parser.getTok().is(AsmToken::Minus); 4525 const MCExpr *ImmVal; 4526 if (getParser().ParseExpression(ImmVal)) 4527 return true; 4528 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 4529 if (CE) { 4530 int32_t Val = CE->getValue(); 4531 if (isNegative && Val == 0) 4532 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 4533 } 4534 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4535 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 4536 return false; 4537 } 4538 case AsmToken::Colon: { 4539 // ":lower16:" and ":upper16:" expression prefixes 4540 // FIXME: Check it's an expression prefix, 4541 // e.g. (FOO - :lower16:BAR) isn't legal. 4542 ARMMCExpr::VariantKind RefKind; 4543 if (parsePrefix(RefKind)) 4544 return true; 4545 4546 const MCExpr *SubExprVal; 4547 if (getParser().ParseExpression(SubExprVal)) 4548 return true; 4549 4550 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 4551 getContext()); 4552 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4553 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 4554 return false; 4555 } 4556 } 4557 } 4558 4559 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 4560 // :lower16: and :upper16:. 4561 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 4562 RefKind = ARMMCExpr::VK_ARM_None; 4563 4564 // :lower16: and :upper16: modifiers 4565 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 4566 Parser.Lex(); // Eat ':' 4567 4568 if (getLexer().isNot(AsmToken::Identifier)) { 4569 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 4570 return true; 4571 } 4572 4573 StringRef IDVal = Parser.getTok().getIdentifier(); 4574 if (IDVal == "lower16") { 4575 RefKind = ARMMCExpr::VK_ARM_LO16; 4576 } else if (IDVal == "upper16") { 4577 RefKind = ARMMCExpr::VK_ARM_HI16; 4578 } else { 4579 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 4580 return true; 4581 } 4582 Parser.Lex(); 4583 4584 if (getLexer().isNot(AsmToken::Colon)) { 4585 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 4586 return true; 4587 } 4588 Parser.Lex(); // Eat the last ':' 4589 return false; 4590 } 4591 4592 /// \brief Given a mnemonic, split out possible predication code and carry 4593 /// setting letters to form a canonical mnemonic and flags. 4594 // 4595 // FIXME: Would be nice to autogen this. 4596 // FIXME: This is a bit of a maze of special cases. 4597 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 4598 unsigned &PredicationCode, 4599 bool &CarrySetting, 4600 unsigned &ProcessorIMod, 4601 StringRef &ITMask) { 4602 PredicationCode = ARMCC::AL; 4603 CarrySetting = false; 4604 ProcessorIMod = 0; 4605 4606 // Ignore some mnemonics we know aren't predicated forms. 4607 // 4608 // FIXME: Would be nice to autogen this. 4609 if ((Mnemonic == "movs" && isThumb()) || 4610 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 4611 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 4612 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 4613 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 4614 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 4615 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 4616 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || 4617 Mnemonic == "fmuls") 4618 return Mnemonic; 4619 4620 // First, split out any predication code. Ignore mnemonics we know aren't 4621 // predicated but do have a carry-set and so weren't caught above. 4622 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 4623 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 4624 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 4625 Mnemonic != "sbcs" && Mnemonic != "rscs") { 4626 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 4627 .Case("eq", ARMCC::EQ) 4628 .Case("ne", ARMCC::NE) 4629 .Case("hs", ARMCC::HS) 4630 .Case("cs", ARMCC::HS) 4631 .Case("lo", ARMCC::LO) 4632 .Case("cc", ARMCC::LO) 4633 .Case("mi", ARMCC::MI) 4634 .Case("pl", ARMCC::PL) 4635 .Case("vs", ARMCC::VS) 4636 .Case("vc", ARMCC::VC) 4637 .Case("hi", ARMCC::HI) 4638 .Case("ls", ARMCC::LS) 4639 .Case("ge", ARMCC::GE) 4640 .Case("lt", ARMCC::LT) 4641 .Case("gt", ARMCC::GT) 4642 .Case("le", ARMCC::LE) 4643 .Case("al", ARMCC::AL) 4644 .Default(~0U); 4645 if (CC != ~0U) { 4646 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 4647 PredicationCode = CC; 4648 } 4649 } 4650 4651 // Next, determine if we have a carry setting bit. We explicitly ignore all 4652 // the instructions we know end in 's'. 4653 if (Mnemonic.endswith("s") && 4654 !(Mnemonic == "cps" || Mnemonic == "mls" || 4655 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 4656 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 4657 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 4658 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" || 4659 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" || 4660 Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" || 4661 Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || 4662 (Mnemonic == "movs" && isThumb()))) { 4663 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 4664 CarrySetting = true; 4665 } 4666 4667 // The "cps" instruction can have a interrupt mode operand which is glued into 4668 // the mnemonic. Check if this is the case, split it and parse the imod op 4669 if (Mnemonic.startswith("cps")) { 4670 // Split out any imod code. 4671 unsigned IMod = 4672 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 4673 .Case("ie", ARM_PROC::IE) 4674 .Case("id", ARM_PROC::ID) 4675 .Default(~0U); 4676 if (IMod != ~0U) { 4677 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 4678 ProcessorIMod = IMod; 4679 } 4680 } 4681 4682 // The "it" instruction has the condition mask on the end of the mnemonic. 4683 if (Mnemonic.startswith("it")) { 4684 ITMask = Mnemonic.slice(2, Mnemonic.size()); 4685 Mnemonic = Mnemonic.slice(0, 2); 4686 } 4687 4688 return Mnemonic; 4689 } 4690 4691 /// \brief Given a canonical mnemonic, determine if the instruction ever allows 4692 /// inclusion of carry set or predication code operands. 4693 // 4694 // FIXME: It would be nice to autogen this. 4695 void ARMAsmParser:: 4696 getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 4697 bool &CanAcceptPredicationCode) { 4698 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 4699 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 4700 Mnemonic == "add" || Mnemonic == "adc" || 4701 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 4702 Mnemonic == "orr" || Mnemonic == "mvn" || 4703 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 4704 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 4705 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 4706 Mnemonic == "mla" || Mnemonic == "smlal" || 4707 Mnemonic == "umlal" || Mnemonic == "umull"))) { 4708 CanAcceptCarrySet = true; 4709 } else 4710 CanAcceptCarrySet = false; 4711 4712 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 4713 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 4714 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 4715 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 4716 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 4717 (Mnemonic == "clrex" && !isThumb()) || 4718 (Mnemonic == "nop" && isThumbOne()) || 4719 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 4720 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 4721 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 4722 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 4723 !isThumb()) || 4724 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 4725 CanAcceptPredicationCode = false; 4726 } else 4727 CanAcceptPredicationCode = true; 4728 4729 if (isThumb()) { 4730 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 4731 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 4732 CanAcceptPredicationCode = false; 4733 } 4734 } 4735 4736 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 4737 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4738 // FIXME: This is all horribly hacky. We really need a better way to deal 4739 // with optional operands like this in the matcher table. 4740 4741 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 4742 // another does not. Specifically, the MOVW instruction does not. So we 4743 // special case it here and remove the defaulted (non-setting) cc_out 4744 // operand if that's the instruction we're trying to match. 4745 // 4746 // We do this as post-processing of the explicit operands rather than just 4747 // conditionally adding the cc_out in the first place because we need 4748 // to check the type of the parsed immediate operand. 4749 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 4750 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 4751 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 4752 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4753 return true; 4754 4755 // Register-register 'add' for thumb does not have a cc_out operand 4756 // when there are only two register operands. 4757 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 4758 static_cast<ARMOperand*>(Operands[3])->isReg() && 4759 static_cast<ARMOperand*>(Operands[4])->isReg() && 4760 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4761 return true; 4762 // Register-register 'add' for thumb does not have a cc_out operand 4763 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 4764 // have to check the immediate range here since Thumb2 has a variant 4765 // that can handle a different range and has a cc_out operand. 4766 if (((isThumb() && Mnemonic == "add") || 4767 (isThumbTwo() && Mnemonic == "sub")) && 4768 Operands.size() == 6 && 4769 static_cast<ARMOperand*>(Operands[3])->isReg() && 4770 static_cast<ARMOperand*>(Operands[4])->isReg() && 4771 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 4772 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4773 (static_cast<ARMOperand*>(Operands[5])->isReg() || 4774 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 4775 return true; 4776 // For Thumb2, add/sub immediate does not have a cc_out operand for the 4777 // imm0_4095 variant. That's the least-preferred variant when 4778 // selecting via the generic "add" mnemonic, so to know that we 4779 // should remove the cc_out operand, we have to explicitly check that 4780 // it's not one of the other variants. Ugh. 4781 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 4782 Operands.size() == 6 && 4783 static_cast<ARMOperand*>(Operands[3])->isReg() && 4784 static_cast<ARMOperand*>(Operands[4])->isReg() && 4785 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4786 // Nest conditions rather than one big 'if' statement for readability. 4787 // 4788 // If either register is a high reg, it's either one of the SP 4789 // variants (handled above) or a 32-bit encoding, so we just 4790 // check against T3. If the second register is the PC, this is an 4791 // alternate form of ADR, which uses encoding T4, so check for that too. 4792 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4793 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 4794 static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC && 4795 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 4796 return false; 4797 // If both registers are low, we're in an IT block, and the immediate is 4798 // in range, we should use encoding T1 instead, which has a cc_out. 4799 if (inITBlock() && 4800 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4801 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 4802 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 4803 return false; 4804 4805 // Otherwise, we use encoding T4, which does not have a cc_out 4806 // operand. 4807 return true; 4808 } 4809 4810 // The thumb2 multiply instruction doesn't have a CCOut register, so 4811 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 4812 // use the 16-bit encoding or not. 4813 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4814 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4815 static_cast<ARMOperand*>(Operands[3])->isReg() && 4816 static_cast<ARMOperand*>(Operands[4])->isReg() && 4817 static_cast<ARMOperand*>(Operands[5])->isReg() && 4818 // If the registers aren't low regs, the destination reg isn't the 4819 // same as one of the source regs, or the cc_out operand is zero 4820 // outside of an IT block, we have to use the 32-bit encoding, so 4821 // remove the cc_out operand. 4822 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4823 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4824 !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) || 4825 !inITBlock() || 4826 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4827 static_cast<ARMOperand*>(Operands[5])->getReg() && 4828 static_cast<ARMOperand*>(Operands[3])->getReg() != 4829 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4830 return true; 4831 4832 // Also check the 'mul' syntax variant that doesn't specify an explicit 4833 // destination register. 4834 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && 4835 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4836 static_cast<ARMOperand*>(Operands[3])->isReg() && 4837 static_cast<ARMOperand*>(Operands[4])->isReg() && 4838 // If the registers aren't low regs or the cc_out operand is zero 4839 // outside of an IT block, we have to use the 32-bit encoding, so 4840 // remove the cc_out operand. 4841 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4842 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4843 !inITBlock())) 4844 return true; 4845 4846 4847 4848 // Register-register 'add/sub' for thumb does not have a cc_out operand 4849 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4850 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4851 // right, this will result in better diagnostics (which operand is off) 4852 // anyway. 4853 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4854 (Operands.size() == 5 || Operands.size() == 6) && 4855 static_cast<ARMOperand*>(Operands[3])->isReg() && 4856 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4857 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4858 return true; 4859 4860 return false; 4861 } 4862 4863 static bool isDataTypeToken(StringRef Tok) { 4864 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || 4865 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || 4866 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || 4867 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || 4868 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || 4869 Tok == ".f" || Tok == ".d"; 4870 } 4871 4872 // FIXME: This bit should probably be handled via an explicit match class 4873 // in the .td files that matches the suffix instead of having it be 4874 // a literal string token the way it is now. 4875 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { 4876 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); 4877 } 4878 4879 static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features); 4880 /// Parse an arm instruction mnemonic followed by its operands. 4881 bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4882 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4883 // Apply mnemonic aliases before doing anything else, as the destination 4884 // mnemnonic may include suffices and we want to handle them normally. 4885 // The generic tblgen'erated code does this later, at the start of 4886 // MatchInstructionImpl(), but that's too late for aliases that include 4887 // any sort of suffix. 4888 unsigned AvailableFeatures = getAvailableFeatures(); 4889 applyMnemonicAliases(Name, AvailableFeatures); 4890 4891 // First check for the ARM-specific .req directive. 4892 if (Parser.getTok().is(AsmToken::Identifier) && 4893 Parser.getTok().getIdentifier() == ".req") { 4894 parseDirectiveReq(Name, NameLoc); 4895 // We always return 'error' for this, as we're done with this 4896 // statement and don't need to match the 'instruction." 4897 return true; 4898 } 4899 4900 // Create the leading tokens for the mnemonic, split by '.' characters. 4901 size_t Start = 0, Next = Name.find('.'); 4902 StringRef Mnemonic = Name.slice(Start, Next); 4903 4904 // Split out the predication code and carry setting flag from the mnemonic. 4905 unsigned PredicationCode; 4906 unsigned ProcessorIMod; 4907 bool CarrySetting; 4908 StringRef ITMask; 4909 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4910 ProcessorIMod, ITMask); 4911 4912 // In Thumb1, only the branch (B) instruction can be predicated. 4913 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4914 Parser.EatToEndOfStatement(); 4915 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4916 } 4917 4918 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4919 4920 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4921 // is the mask as it will be for the IT encoding if the conditional 4922 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4923 // where the conditional bit0 is zero, the instruction post-processing 4924 // will adjust the mask accordingly. 4925 if (Mnemonic == "it") { 4926 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4927 if (ITMask.size() > 3) { 4928 Parser.EatToEndOfStatement(); 4929 return Error(Loc, "too many conditions on IT instruction"); 4930 } 4931 unsigned Mask = 8; 4932 for (unsigned i = ITMask.size(); i != 0; --i) { 4933 char pos = ITMask[i - 1]; 4934 if (pos != 't' && pos != 'e') { 4935 Parser.EatToEndOfStatement(); 4936 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4937 } 4938 Mask >>= 1; 4939 if (ITMask[i - 1] == 't') 4940 Mask |= 8; 4941 } 4942 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4943 } 4944 4945 // FIXME: This is all a pretty gross hack. We should automatically handle 4946 // optional operands like this via tblgen. 4947 4948 // Next, add the CCOut and ConditionCode operands, if needed. 4949 // 4950 // For mnemonics which can ever incorporate a carry setting bit or predication 4951 // code, our matching model involves us always generating CCOut and 4952 // ConditionCode operands to match the mnemonic "as written" and then we let 4953 // the matcher deal with finding the right instruction or generating an 4954 // appropriate error. 4955 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4956 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4957 4958 // If we had a carry-set on an instruction that can't do that, issue an 4959 // error. 4960 if (!CanAcceptCarrySet && CarrySetting) { 4961 Parser.EatToEndOfStatement(); 4962 return Error(NameLoc, "instruction '" + Mnemonic + 4963 "' can not set flags, but 's' suffix specified"); 4964 } 4965 // If we had a predication code on an instruction that can't do that, issue an 4966 // error. 4967 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4968 Parser.EatToEndOfStatement(); 4969 return Error(NameLoc, "instruction '" + Mnemonic + 4970 "' is not predicable, but condition code specified"); 4971 } 4972 4973 // Add the carry setting operand, if necessary. 4974 if (CanAcceptCarrySet) { 4975 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4976 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4977 Loc)); 4978 } 4979 4980 // Add the predication code operand, if necessary. 4981 if (CanAcceptPredicationCode) { 4982 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4983 CarrySetting); 4984 Operands.push_back(ARMOperand::CreateCondCode( 4985 ARMCC::CondCodes(PredicationCode), Loc)); 4986 } 4987 4988 // Add the processor imod operand, if necessary. 4989 if (ProcessorIMod) { 4990 Operands.push_back(ARMOperand::CreateImm( 4991 MCConstantExpr::Create(ProcessorIMod, getContext()), 4992 NameLoc, NameLoc)); 4993 } 4994 4995 // Add the remaining tokens in the mnemonic. 4996 while (Next != StringRef::npos) { 4997 Start = Next; 4998 Next = Name.find('.', Start + 1); 4999 StringRef ExtraToken = Name.slice(Start, Next); 5000 5001 // Some NEON instructions have an optional datatype suffix that is 5002 // completely ignored. Check for that. 5003 if (isDataTypeToken(ExtraToken) && 5004 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) 5005 continue; 5006 5007 if (ExtraToken != ".n") { 5008 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 5009 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 5010 } 5011 } 5012 5013 // Read the remaining operands. 5014 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5015 // Read the first operand. 5016 if (parseOperand(Operands, Mnemonic)) { 5017 Parser.EatToEndOfStatement(); 5018 return true; 5019 } 5020 5021 while (getLexer().is(AsmToken::Comma)) { 5022 Parser.Lex(); // Eat the comma. 5023 5024 // Parse and remember the operand. 5025 if (parseOperand(Operands, Mnemonic)) { 5026 Parser.EatToEndOfStatement(); 5027 return true; 5028 } 5029 } 5030 } 5031 5032 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5033 SMLoc Loc = getLexer().getLoc(); 5034 Parser.EatToEndOfStatement(); 5035 return Error(Loc, "unexpected token in argument list"); 5036 } 5037 5038 Parser.Lex(); // Consume the EndOfStatement 5039 5040 // Some instructions, mostly Thumb, have forms for the same mnemonic that 5041 // do and don't have a cc_out optional-def operand. With some spot-checks 5042 // of the operand list, we can figure out which variant we're trying to 5043 // parse and adjust accordingly before actually matching. We shouldn't ever 5044 // try to remove a cc_out operand that was explicitly set on the the 5045 // mnemonic, of course (CarrySetting == true). Reason number #317 the 5046 // table driven matcher doesn't fit well with the ARM instruction set. 5047 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 5048 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 5049 Operands.erase(Operands.begin() + 1); 5050 delete Op; 5051 } 5052 5053 // ARM mode 'blx' need special handling, as the register operand version 5054 // is predicable, but the label operand version is not. So, we can't rely 5055 // on the Mnemonic based checking to correctly figure out when to put 5056 // a k_CondCode operand in the list. If we're trying to match the label 5057 // version, remove the k_CondCode operand here. 5058 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 5059 static_cast<ARMOperand*>(Operands[2])->isImm()) { 5060 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 5061 Operands.erase(Operands.begin() + 1); 5062 delete Op; 5063 } 5064 5065 // The vector-compare-to-zero instructions have a literal token "#0" at 5066 // the end that comes to here as an immediate operand. Convert it to a 5067 // token to play nicely with the matcher. 5068 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 5069 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 5070 static_cast<ARMOperand*>(Operands[5])->isImm()) { 5071 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 5072 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 5073 if (CE && CE->getValue() == 0) { 5074 Operands.erase(Operands.begin() + 5); 5075 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 5076 delete Op; 5077 } 5078 } 5079 // VCMP{E} does the same thing, but with a different operand count. 5080 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 5081 static_cast<ARMOperand*>(Operands[4])->isImm()) { 5082 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 5083 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 5084 if (CE && CE->getValue() == 0) { 5085 Operands.erase(Operands.begin() + 4); 5086 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 5087 delete Op; 5088 } 5089 } 5090 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 5091 // end. Convert it to a token here. Take care not to convert those 5092 // that should hit the Thumb2 encoding. 5093 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 5094 static_cast<ARMOperand*>(Operands[3])->isReg() && 5095 static_cast<ARMOperand*>(Operands[4])->isReg() && 5096 static_cast<ARMOperand*>(Operands[5])->isImm()) { 5097 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 5098 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 5099 if (CE && CE->getValue() == 0 && 5100 (isThumbOne() || 5101 // The cc_out operand matches the IT block. 5102 ((inITBlock() != CarrySetting) && 5103 // Neither register operand is a high register. 5104 (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 5105 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){ 5106 Operands.erase(Operands.begin() + 5); 5107 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 5108 delete Op; 5109 } 5110 } 5111 5112 return false; 5113 } 5114 5115 // Validate context-sensitive operand constraints. 5116 5117 // return 'true' if register list contains non-low GPR registers, 5118 // 'false' otherwise. If Reg is in the register list or is HiReg, set 5119 // 'containsReg' to true. 5120 static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 5121 unsigned HiReg, bool &containsReg) { 5122 containsReg = false; 5123 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 5124 unsigned OpReg = Inst.getOperand(i).getReg(); 5125 if (OpReg == Reg) 5126 containsReg = true; 5127 // Anything other than a low register isn't legal here. 5128 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 5129 return true; 5130 } 5131 return false; 5132 } 5133 5134 // Check if the specified regisgter is in the register list of the inst, 5135 // starting at the indicated operand number. 5136 static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 5137 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 5138 unsigned OpReg = Inst.getOperand(i).getReg(); 5139 if (OpReg == Reg) 5140 return true; 5141 } 5142 return false; 5143 } 5144 5145 // FIXME: We would really prefer to have MCInstrInfo (the wrapper around 5146 // the ARMInsts array) instead. Getting that here requires awkward 5147 // API changes, though. Better way? 5148 namespace llvm { 5149 extern const MCInstrDesc ARMInsts[]; 5150 } 5151 static const MCInstrDesc &getInstDesc(unsigned Opcode) { 5152 return ARMInsts[Opcode]; 5153 } 5154 5155 // FIXME: We would really like to be able to tablegen'erate this. 5156 bool ARMAsmParser:: 5157 validateInstruction(MCInst &Inst, 5158 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 5159 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 5160 SMLoc Loc = Operands[0]->getStartLoc(); 5161 // Check the IT block state first. 5162 // NOTE: BKPT instruction has the interesting property of being 5163 // allowed in IT blocks, but not being predicable. It just always 5164 // executes. 5165 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT && 5166 Inst.getOpcode() != ARM::BKPT) { 5167 unsigned bit = 1; 5168 if (ITState.FirstCond) 5169 ITState.FirstCond = false; 5170 else 5171 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 5172 // The instruction must be predicable. 5173 if (!MCID.isPredicable()) 5174 return Error(Loc, "instructions in IT block must be predicable"); 5175 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 5176 unsigned ITCond = bit ? ITState.Cond : 5177 ARMCC::getOppositeCondition(ITState.Cond); 5178 if (Cond != ITCond) { 5179 // Find the condition code Operand to get its SMLoc information. 5180 SMLoc CondLoc; 5181 for (unsigned i = 1; i < Operands.size(); ++i) 5182 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 5183 CondLoc = Operands[i]->getStartLoc(); 5184 return Error(CondLoc, "incorrect condition in IT block; got '" + 5185 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 5186 "', but expected '" + 5187 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 5188 } 5189 // Check for non-'al' condition codes outside of the IT block. 5190 } else if (isThumbTwo() && MCID.isPredicable() && 5191 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 5192 ARMCC::AL && Inst.getOpcode() != ARM::tB && 5193 Inst.getOpcode() != ARM::t2B) 5194 return Error(Loc, "predicated instructions must be in IT block"); 5195 5196 switch (Inst.getOpcode()) { 5197 case ARM::LDRD: 5198 case ARM::LDRD_PRE: 5199 case ARM::LDRD_POST: 5200 case ARM::LDREXD: { 5201 // Rt2 must be Rt + 1. 5202 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 5203 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 5204 if (Rt2 != Rt + 1) 5205 return Error(Operands[3]->getStartLoc(), 5206 "destination operands must be sequential"); 5207 return false; 5208 } 5209 case ARM::STRD: { 5210 // Rt2 must be Rt + 1. 5211 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 5212 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 5213 if (Rt2 != Rt + 1) 5214 return Error(Operands[3]->getStartLoc(), 5215 "source operands must be sequential"); 5216 return false; 5217 } 5218 case ARM::STRD_PRE: 5219 case ARM::STRD_POST: 5220 case ARM::STREXD: { 5221 // Rt2 must be Rt + 1. 5222 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 5223 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 5224 if (Rt2 != Rt + 1) 5225 return Error(Operands[3]->getStartLoc(), 5226 "source operands must be sequential"); 5227 return false; 5228 } 5229 case ARM::SBFX: 5230 case ARM::UBFX: { 5231 // width must be in range [1, 32-lsb] 5232 unsigned lsb = Inst.getOperand(2).getImm(); 5233 unsigned widthm1 = Inst.getOperand(3).getImm(); 5234 if (widthm1 >= 32 - lsb) 5235 return Error(Operands[5]->getStartLoc(), 5236 "bitfield width must be in range [1,32-lsb]"); 5237 return false; 5238 } 5239 case ARM::tLDMIA: { 5240 // If we're parsing Thumb2, the .w variant is available and handles 5241 // most cases that are normally illegal for a Thumb1 LDM 5242 // instruction. We'll make the transformation in processInstruction() 5243 // if necessary. 5244 // 5245 // Thumb LDM instructions are writeback iff the base register is not 5246 // in the register list. 5247 unsigned Rn = Inst.getOperand(0).getReg(); 5248 bool hasWritebackToken = 5249 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5250 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 5251 bool listContainsBase; 5252 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 5253 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 5254 "registers must be in range r0-r7"); 5255 // If we should have writeback, then there should be a '!' token. 5256 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 5257 return Error(Operands[2]->getStartLoc(), 5258 "writeback operator '!' expected"); 5259 // If we should not have writeback, there must not be a '!'. This is 5260 // true even for the 32-bit wide encodings. 5261 if (listContainsBase && hasWritebackToken) 5262 return Error(Operands[3]->getStartLoc(), 5263 "writeback operator '!' not allowed when base register " 5264 "in register list"); 5265 5266 break; 5267 } 5268 case ARM::t2LDMIA_UPD: { 5269 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 5270 return Error(Operands[4]->getStartLoc(), 5271 "writeback operator '!' not allowed when base register " 5272 "in register list"); 5273 break; 5274 } 5275 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 5276 // so only issue a diagnostic for thumb1. The instructions will be 5277 // switched to the t2 encodings in processInstruction() if necessary. 5278 case ARM::tPOP: { 5279 bool listContainsBase; 5280 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) && 5281 !isThumbTwo()) 5282 return Error(Operands[2]->getStartLoc(), 5283 "registers must be in range r0-r7 or pc"); 5284 break; 5285 } 5286 case ARM::tPUSH: { 5287 bool listContainsBase; 5288 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) && 5289 !isThumbTwo()) 5290 return Error(Operands[2]->getStartLoc(), 5291 "registers must be in range r0-r7 or lr"); 5292 break; 5293 } 5294 case ARM::tSTMIA_UPD: { 5295 bool listContainsBase; 5296 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 5297 return Error(Operands[4]->getStartLoc(), 5298 "registers must be in range r0-r7"); 5299 break; 5300 } 5301 } 5302 5303 return false; 5304 } 5305 5306 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) { 5307 switch(Opc) { 5308 default: llvm_unreachable("unexpected opcode!"); 5309 // VST1LN 5310 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; 5311 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; 5312 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; 5313 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; 5314 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; 5315 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; 5316 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8; 5317 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16; 5318 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32; 5319 5320 // VST2LN 5321 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; 5322 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; 5323 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; 5324 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; 5325 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; 5326 5327 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; 5328 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; 5329 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; 5330 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; 5331 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; 5332 5333 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8; 5334 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16; 5335 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32; 5336 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16; 5337 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32; 5338 5339 // VST3LN 5340 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; 5341 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; 5342 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; 5343 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD; 5344 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; 5345 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; 5346 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; 5347 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; 5348 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD; 5349 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; 5350 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8; 5351 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16; 5352 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32; 5353 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16; 5354 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32; 5355 5356 // VST3 5357 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; 5358 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; 5359 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; 5360 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; 5361 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; 5362 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; 5363 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; 5364 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; 5365 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; 5366 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; 5367 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; 5368 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; 5369 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8; 5370 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16; 5371 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32; 5372 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8; 5373 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16; 5374 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32; 5375 5376 // VST4LN 5377 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; 5378 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; 5379 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; 5380 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD; 5381 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; 5382 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; 5383 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; 5384 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; 5385 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD; 5386 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; 5387 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8; 5388 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16; 5389 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32; 5390 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16; 5391 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32; 5392 5393 // VST4 5394 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; 5395 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; 5396 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; 5397 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; 5398 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; 5399 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; 5400 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; 5401 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; 5402 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; 5403 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; 5404 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; 5405 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; 5406 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8; 5407 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16; 5408 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32; 5409 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8; 5410 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16; 5411 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32; 5412 } 5413 } 5414 5415 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) { 5416 switch(Opc) { 5417 default: llvm_unreachable("unexpected opcode!"); 5418 // VLD1LN 5419 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; 5420 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; 5421 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; 5422 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; 5423 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; 5424 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; 5425 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8; 5426 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16; 5427 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32; 5428 5429 // VLD2LN 5430 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; 5431 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; 5432 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; 5433 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD; 5434 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; 5435 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; 5436 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; 5437 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; 5438 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD; 5439 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; 5440 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8; 5441 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16; 5442 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32; 5443 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16; 5444 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32; 5445 5446 // VLD3DUP 5447 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; 5448 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; 5449 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; 5450 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD; 5451 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD; 5452 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; 5453 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; 5454 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; 5455 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; 5456 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD; 5457 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD; 5458 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; 5459 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8; 5460 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16; 5461 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32; 5462 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8; 5463 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16; 5464 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32; 5465 5466 // VLD3LN 5467 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; 5468 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; 5469 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; 5470 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD; 5471 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; 5472 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; 5473 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; 5474 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; 5475 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD; 5476 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; 5477 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8; 5478 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16; 5479 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32; 5480 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16; 5481 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32; 5482 5483 // VLD3 5484 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; 5485 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; 5486 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; 5487 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; 5488 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; 5489 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; 5490 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; 5491 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; 5492 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; 5493 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; 5494 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; 5495 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; 5496 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8; 5497 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16; 5498 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32; 5499 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8; 5500 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16; 5501 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32; 5502 5503 // VLD4LN 5504 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; 5505 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; 5506 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; 5507 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD; 5508 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; 5509 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; 5510 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; 5511 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; 5512 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD; 5513 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; 5514 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8; 5515 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16; 5516 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32; 5517 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16; 5518 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32; 5519 5520 // VLD4DUP 5521 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; 5522 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; 5523 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; 5524 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD; 5525 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD; 5526 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; 5527 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; 5528 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; 5529 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; 5530 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD; 5531 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD; 5532 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; 5533 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8; 5534 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16; 5535 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32; 5536 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8; 5537 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16; 5538 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32; 5539 5540 // VLD4 5541 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; 5542 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; 5543 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; 5544 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; 5545 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; 5546 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; 5547 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; 5548 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; 5549 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; 5550 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; 5551 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; 5552 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; 5553 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8; 5554 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16; 5555 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32; 5556 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8; 5557 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16; 5558 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32; 5559 } 5560 } 5561 5562 bool ARMAsmParser:: 5563 processInstruction(MCInst &Inst, 5564 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 5565 switch (Inst.getOpcode()) { 5566 // Aliases for alternate PC+imm syntax of LDR instructions. 5567 case ARM::t2LDRpcrel: 5568 Inst.setOpcode(ARM::t2LDRpci); 5569 return true; 5570 case ARM::t2LDRBpcrel: 5571 Inst.setOpcode(ARM::t2LDRBpci); 5572 return true; 5573 case ARM::t2LDRHpcrel: 5574 Inst.setOpcode(ARM::t2LDRHpci); 5575 return true; 5576 case ARM::t2LDRSBpcrel: 5577 Inst.setOpcode(ARM::t2LDRSBpci); 5578 return true; 5579 case ARM::t2LDRSHpcrel: 5580 Inst.setOpcode(ARM::t2LDRSHpci); 5581 return true; 5582 // Handle NEON VST complex aliases. 5583 case ARM::VST1LNdWB_register_Asm_8: 5584 case ARM::VST1LNdWB_register_Asm_16: 5585 case ARM::VST1LNdWB_register_Asm_32: { 5586 MCInst TmpInst; 5587 // Shuffle the operands around so the lane index operand is in the 5588 // right place. 5589 unsigned Spacing; 5590 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5591 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5592 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5593 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5594 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5595 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5596 TmpInst.addOperand(Inst.getOperand(1)); // lane 5597 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5598 TmpInst.addOperand(Inst.getOperand(6)); 5599 Inst = TmpInst; 5600 return true; 5601 } 5602 5603 case ARM::VST2LNdWB_register_Asm_8: 5604 case ARM::VST2LNdWB_register_Asm_16: 5605 case ARM::VST2LNdWB_register_Asm_32: 5606 case ARM::VST2LNqWB_register_Asm_16: 5607 case ARM::VST2LNqWB_register_Asm_32: { 5608 MCInst TmpInst; 5609 // Shuffle the operands around so the lane index operand is in the 5610 // right place. 5611 unsigned Spacing; 5612 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5613 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5614 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5615 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5616 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5617 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5618 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5619 Spacing)); 5620 TmpInst.addOperand(Inst.getOperand(1)); // lane 5621 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5622 TmpInst.addOperand(Inst.getOperand(6)); 5623 Inst = TmpInst; 5624 return true; 5625 } 5626 5627 case ARM::VST3LNdWB_register_Asm_8: 5628 case ARM::VST3LNdWB_register_Asm_16: 5629 case ARM::VST3LNdWB_register_Asm_32: 5630 case ARM::VST3LNqWB_register_Asm_16: 5631 case ARM::VST3LNqWB_register_Asm_32: { 5632 MCInst TmpInst; 5633 // Shuffle the operands around so the lane index operand is in the 5634 // right place. 5635 unsigned Spacing; 5636 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5637 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5638 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5639 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5640 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5641 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5642 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5643 Spacing)); 5644 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5645 Spacing * 2)); 5646 TmpInst.addOperand(Inst.getOperand(1)); // lane 5647 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5648 TmpInst.addOperand(Inst.getOperand(6)); 5649 Inst = TmpInst; 5650 return true; 5651 } 5652 5653 case ARM::VST4LNdWB_register_Asm_8: 5654 case ARM::VST4LNdWB_register_Asm_16: 5655 case ARM::VST4LNdWB_register_Asm_32: 5656 case ARM::VST4LNqWB_register_Asm_16: 5657 case ARM::VST4LNqWB_register_Asm_32: { 5658 MCInst TmpInst; 5659 // Shuffle the operands around so the lane index operand is in the 5660 // right place. 5661 unsigned Spacing; 5662 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5663 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5664 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5665 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5666 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5667 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5668 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5669 Spacing)); 5670 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5671 Spacing * 2)); 5672 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5673 Spacing * 3)); 5674 TmpInst.addOperand(Inst.getOperand(1)); // lane 5675 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5676 TmpInst.addOperand(Inst.getOperand(6)); 5677 Inst = TmpInst; 5678 return true; 5679 } 5680 5681 case ARM::VST1LNdWB_fixed_Asm_8: 5682 case ARM::VST1LNdWB_fixed_Asm_16: 5683 case ARM::VST1LNdWB_fixed_Asm_32: { 5684 MCInst TmpInst; 5685 // Shuffle the operands around so the lane index operand is in the 5686 // right place. 5687 unsigned Spacing; 5688 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5689 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5690 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5691 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5692 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5693 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5694 TmpInst.addOperand(Inst.getOperand(1)); // lane 5695 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5696 TmpInst.addOperand(Inst.getOperand(5)); 5697 Inst = TmpInst; 5698 return true; 5699 } 5700 5701 case ARM::VST2LNdWB_fixed_Asm_8: 5702 case ARM::VST2LNdWB_fixed_Asm_16: 5703 case ARM::VST2LNdWB_fixed_Asm_32: 5704 case ARM::VST2LNqWB_fixed_Asm_16: 5705 case ARM::VST2LNqWB_fixed_Asm_32: { 5706 MCInst TmpInst; 5707 // Shuffle the operands around so the lane index operand is in the 5708 // right place. 5709 unsigned Spacing; 5710 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5711 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5712 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5713 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5714 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5715 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5716 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5717 Spacing)); 5718 TmpInst.addOperand(Inst.getOperand(1)); // lane 5719 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5720 TmpInst.addOperand(Inst.getOperand(5)); 5721 Inst = TmpInst; 5722 return true; 5723 } 5724 5725 case ARM::VST3LNdWB_fixed_Asm_8: 5726 case ARM::VST3LNdWB_fixed_Asm_16: 5727 case ARM::VST3LNdWB_fixed_Asm_32: 5728 case ARM::VST3LNqWB_fixed_Asm_16: 5729 case ARM::VST3LNqWB_fixed_Asm_32: { 5730 MCInst TmpInst; 5731 // Shuffle the operands around so the lane index operand is in the 5732 // right place. 5733 unsigned Spacing; 5734 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5735 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5736 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5737 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5738 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5739 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5740 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5741 Spacing)); 5742 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5743 Spacing * 2)); 5744 TmpInst.addOperand(Inst.getOperand(1)); // lane 5745 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5746 TmpInst.addOperand(Inst.getOperand(5)); 5747 Inst = TmpInst; 5748 return true; 5749 } 5750 5751 case ARM::VST4LNdWB_fixed_Asm_8: 5752 case ARM::VST4LNdWB_fixed_Asm_16: 5753 case ARM::VST4LNdWB_fixed_Asm_32: 5754 case ARM::VST4LNqWB_fixed_Asm_16: 5755 case ARM::VST4LNqWB_fixed_Asm_32: { 5756 MCInst TmpInst; 5757 // Shuffle the operands around so the lane index operand is in the 5758 // right place. 5759 unsigned Spacing; 5760 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5761 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5762 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5763 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5764 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5765 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5766 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5767 Spacing)); 5768 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5769 Spacing * 2)); 5770 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5771 Spacing * 3)); 5772 TmpInst.addOperand(Inst.getOperand(1)); // lane 5773 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5774 TmpInst.addOperand(Inst.getOperand(5)); 5775 Inst = TmpInst; 5776 return true; 5777 } 5778 5779 case ARM::VST1LNdAsm_8: 5780 case ARM::VST1LNdAsm_16: 5781 case ARM::VST1LNdAsm_32: { 5782 MCInst TmpInst; 5783 // Shuffle the operands around so the lane index operand is in the 5784 // right place. 5785 unsigned Spacing; 5786 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5787 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5788 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5789 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5790 TmpInst.addOperand(Inst.getOperand(1)); // lane 5791 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5792 TmpInst.addOperand(Inst.getOperand(5)); 5793 Inst = TmpInst; 5794 return true; 5795 } 5796 5797 case ARM::VST2LNdAsm_8: 5798 case ARM::VST2LNdAsm_16: 5799 case ARM::VST2LNdAsm_32: 5800 case ARM::VST2LNqAsm_16: 5801 case ARM::VST2LNqAsm_32: { 5802 MCInst TmpInst; 5803 // Shuffle the operands around so the lane index operand is in the 5804 // right place. 5805 unsigned Spacing; 5806 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5807 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5808 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5809 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5810 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5811 Spacing)); 5812 TmpInst.addOperand(Inst.getOperand(1)); // lane 5813 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5814 TmpInst.addOperand(Inst.getOperand(5)); 5815 Inst = TmpInst; 5816 return true; 5817 } 5818 5819 case ARM::VST3LNdAsm_8: 5820 case ARM::VST3LNdAsm_16: 5821 case ARM::VST3LNdAsm_32: 5822 case ARM::VST3LNqAsm_16: 5823 case ARM::VST3LNqAsm_32: { 5824 MCInst TmpInst; 5825 // Shuffle the operands around so the lane index operand is in the 5826 // right place. 5827 unsigned Spacing; 5828 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5829 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5830 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5831 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5832 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5833 Spacing)); 5834 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5835 Spacing * 2)); 5836 TmpInst.addOperand(Inst.getOperand(1)); // lane 5837 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5838 TmpInst.addOperand(Inst.getOperand(5)); 5839 Inst = TmpInst; 5840 return true; 5841 } 5842 5843 case ARM::VST4LNdAsm_8: 5844 case ARM::VST4LNdAsm_16: 5845 case ARM::VST4LNdAsm_32: 5846 case ARM::VST4LNqAsm_16: 5847 case ARM::VST4LNqAsm_32: { 5848 MCInst TmpInst; 5849 // Shuffle the operands around so the lane index operand is in the 5850 // right place. 5851 unsigned Spacing; 5852 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 5853 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5854 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5855 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5856 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5857 Spacing)); 5858 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5859 Spacing * 2)); 5860 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5861 Spacing * 3)); 5862 TmpInst.addOperand(Inst.getOperand(1)); // lane 5863 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5864 TmpInst.addOperand(Inst.getOperand(5)); 5865 Inst = TmpInst; 5866 return true; 5867 } 5868 5869 // Handle NEON VLD complex aliases. 5870 case ARM::VLD1LNdWB_register_Asm_8: 5871 case ARM::VLD1LNdWB_register_Asm_16: 5872 case ARM::VLD1LNdWB_register_Asm_32: { 5873 MCInst TmpInst; 5874 // Shuffle the operands around so the lane index operand is in the 5875 // right place. 5876 unsigned Spacing; 5877 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 5878 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5879 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5880 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5881 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5882 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5883 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5884 TmpInst.addOperand(Inst.getOperand(1)); // lane 5885 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5886 TmpInst.addOperand(Inst.getOperand(6)); 5887 Inst = TmpInst; 5888 return true; 5889 } 5890 5891 case ARM::VLD2LNdWB_register_Asm_8: 5892 case ARM::VLD2LNdWB_register_Asm_16: 5893 case ARM::VLD2LNdWB_register_Asm_32: 5894 case ARM::VLD2LNqWB_register_Asm_16: 5895 case ARM::VLD2LNqWB_register_Asm_32: { 5896 MCInst TmpInst; 5897 // Shuffle the operands around so the lane index operand is in the 5898 // right place. 5899 unsigned Spacing; 5900 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 5901 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5902 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5903 Spacing)); 5904 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5905 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5906 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5907 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5908 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5909 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5910 Spacing)); 5911 TmpInst.addOperand(Inst.getOperand(1)); // lane 5912 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5913 TmpInst.addOperand(Inst.getOperand(6)); 5914 Inst = TmpInst; 5915 return true; 5916 } 5917 5918 case ARM::VLD3LNdWB_register_Asm_8: 5919 case ARM::VLD3LNdWB_register_Asm_16: 5920 case ARM::VLD3LNdWB_register_Asm_32: 5921 case ARM::VLD3LNqWB_register_Asm_16: 5922 case ARM::VLD3LNqWB_register_Asm_32: { 5923 MCInst TmpInst; 5924 // Shuffle the operands around so the lane index operand is in the 5925 // right place. 5926 unsigned Spacing; 5927 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 5928 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5929 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5930 Spacing)); 5931 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5932 Spacing * 2)); 5933 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5934 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5935 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5936 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5937 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5938 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5939 Spacing)); 5940 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5941 Spacing * 2)); 5942 TmpInst.addOperand(Inst.getOperand(1)); // lane 5943 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5944 TmpInst.addOperand(Inst.getOperand(6)); 5945 Inst = TmpInst; 5946 return true; 5947 } 5948 5949 case ARM::VLD4LNdWB_register_Asm_8: 5950 case ARM::VLD4LNdWB_register_Asm_16: 5951 case ARM::VLD4LNdWB_register_Asm_32: 5952 case ARM::VLD4LNqWB_register_Asm_16: 5953 case ARM::VLD4LNqWB_register_Asm_32: { 5954 MCInst TmpInst; 5955 // Shuffle the operands around so the lane index operand is in the 5956 // right place. 5957 unsigned Spacing; 5958 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 5959 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5960 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5961 Spacing)); 5962 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5963 Spacing * 2)); 5964 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5965 Spacing * 3)); 5966 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5967 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5968 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5969 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5970 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5971 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5972 Spacing)); 5973 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5974 Spacing * 2)); 5975 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 5976 Spacing * 3)); 5977 TmpInst.addOperand(Inst.getOperand(1)); // lane 5978 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5979 TmpInst.addOperand(Inst.getOperand(6)); 5980 Inst = TmpInst; 5981 return true; 5982 } 5983 5984 case ARM::VLD1LNdWB_fixed_Asm_8: 5985 case ARM::VLD1LNdWB_fixed_Asm_16: 5986 case ARM::VLD1LNdWB_fixed_Asm_32: { 5987 MCInst TmpInst; 5988 // Shuffle the operands around so the lane index operand is in the 5989 // right place. 5990 unsigned Spacing; 5991 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 5992 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5993 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5994 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5995 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5996 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5997 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5998 TmpInst.addOperand(Inst.getOperand(1)); // lane 5999 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6000 TmpInst.addOperand(Inst.getOperand(5)); 6001 Inst = TmpInst; 6002 return true; 6003 } 6004 6005 case ARM::VLD2LNdWB_fixed_Asm_8: 6006 case ARM::VLD2LNdWB_fixed_Asm_16: 6007 case ARM::VLD2LNdWB_fixed_Asm_32: 6008 case ARM::VLD2LNqWB_fixed_Asm_16: 6009 case ARM::VLD2LNqWB_fixed_Asm_32: { 6010 MCInst TmpInst; 6011 // Shuffle the operands around so the lane index operand is in the 6012 // right place. 6013 unsigned Spacing; 6014 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6015 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6016 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6017 Spacing)); 6018 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6019 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6020 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6021 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6022 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6023 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6024 Spacing)); 6025 TmpInst.addOperand(Inst.getOperand(1)); // lane 6026 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6027 TmpInst.addOperand(Inst.getOperand(5)); 6028 Inst = TmpInst; 6029 return true; 6030 } 6031 6032 case ARM::VLD3LNdWB_fixed_Asm_8: 6033 case ARM::VLD3LNdWB_fixed_Asm_16: 6034 case ARM::VLD3LNdWB_fixed_Asm_32: 6035 case ARM::VLD3LNqWB_fixed_Asm_16: 6036 case ARM::VLD3LNqWB_fixed_Asm_32: { 6037 MCInst TmpInst; 6038 // Shuffle the operands around so the lane index operand is in the 6039 // right place. 6040 unsigned Spacing; 6041 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6042 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6043 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6044 Spacing)); 6045 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6046 Spacing * 2)); 6047 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6048 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6049 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6050 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6051 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6052 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6053 Spacing)); 6054 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6055 Spacing * 2)); 6056 TmpInst.addOperand(Inst.getOperand(1)); // lane 6057 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6058 TmpInst.addOperand(Inst.getOperand(5)); 6059 Inst = TmpInst; 6060 return true; 6061 } 6062 6063 case ARM::VLD4LNdWB_fixed_Asm_8: 6064 case ARM::VLD4LNdWB_fixed_Asm_16: 6065 case ARM::VLD4LNdWB_fixed_Asm_32: 6066 case ARM::VLD4LNqWB_fixed_Asm_16: 6067 case ARM::VLD4LNqWB_fixed_Asm_32: { 6068 MCInst TmpInst; 6069 // Shuffle the operands around so the lane index operand is in the 6070 // right place. 6071 unsigned Spacing; 6072 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6073 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6074 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6075 Spacing)); 6076 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6077 Spacing * 2)); 6078 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6079 Spacing * 3)); 6080 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 6081 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6082 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6083 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6084 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6085 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6086 Spacing)); 6087 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6088 Spacing * 2)); 6089 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6090 Spacing * 3)); 6091 TmpInst.addOperand(Inst.getOperand(1)); // lane 6092 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6093 TmpInst.addOperand(Inst.getOperand(5)); 6094 Inst = TmpInst; 6095 return true; 6096 } 6097 6098 case ARM::VLD1LNdAsm_8: 6099 case ARM::VLD1LNdAsm_16: 6100 case ARM::VLD1LNdAsm_32: { 6101 MCInst TmpInst; 6102 // Shuffle the operands around so the lane index operand is in the 6103 // right place. 6104 unsigned Spacing; 6105 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6106 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6107 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6108 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6109 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6110 TmpInst.addOperand(Inst.getOperand(1)); // lane 6111 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6112 TmpInst.addOperand(Inst.getOperand(5)); 6113 Inst = TmpInst; 6114 return true; 6115 } 6116 6117 case ARM::VLD2LNdAsm_8: 6118 case ARM::VLD2LNdAsm_16: 6119 case ARM::VLD2LNdAsm_32: 6120 case ARM::VLD2LNqAsm_16: 6121 case ARM::VLD2LNqAsm_32: { 6122 MCInst TmpInst; 6123 // Shuffle the operands around so the lane index operand is in the 6124 // right place. 6125 unsigned Spacing; 6126 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6127 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6128 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6129 Spacing)); 6130 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6131 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6132 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6133 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6134 Spacing)); 6135 TmpInst.addOperand(Inst.getOperand(1)); // lane 6136 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6137 TmpInst.addOperand(Inst.getOperand(5)); 6138 Inst = TmpInst; 6139 return true; 6140 } 6141 6142 case ARM::VLD3LNdAsm_8: 6143 case ARM::VLD3LNdAsm_16: 6144 case ARM::VLD3LNdAsm_32: 6145 case ARM::VLD3LNqAsm_16: 6146 case ARM::VLD3LNqAsm_32: { 6147 MCInst TmpInst; 6148 // Shuffle the operands around so the lane index operand is in the 6149 // right place. 6150 unsigned Spacing; 6151 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6152 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6153 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6154 Spacing)); 6155 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6156 Spacing * 2)); 6157 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6158 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6159 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6160 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6161 Spacing)); 6162 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6163 Spacing * 2)); 6164 TmpInst.addOperand(Inst.getOperand(1)); // lane 6165 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6166 TmpInst.addOperand(Inst.getOperand(5)); 6167 Inst = TmpInst; 6168 return true; 6169 } 6170 6171 case ARM::VLD4LNdAsm_8: 6172 case ARM::VLD4LNdAsm_16: 6173 case ARM::VLD4LNdAsm_32: 6174 case ARM::VLD4LNqAsm_16: 6175 case ARM::VLD4LNqAsm_32: { 6176 MCInst TmpInst; 6177 // Shuffle the operands around so the lane index operand is in the 6178 // right place. 6179 unsigned Spacing; 6180 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6181 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6182 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6183 Spacing)); 6184 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6185 Spacing * 2)); 6186 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6187 Spacing * 3)); 6188 TmpInst.addOperand(Inst.getOperand(2)); // Rn 6189 TmpInst.addOperand(Inst.getOperand(3)); // alignment 6190 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 6191 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6192 Spacing)); 6193 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6194 Spacing * 2)); 6195 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6196 Spacing * 3)); 6197 TmpInst.addOperand(Inst.getOperand(1)); // lane 6198 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6199 TmpInst.addOperand(Inst.getOperand(5)); 6200 Inst = TmpInst; 6201 return true; 6202 } 6203 6204 // VLD3DUP single 3-element structure to all lanes instructions. 6205 case ARM::VLD3DUPdAsm_8: 6206 case ARM::VLD3DUPdAsm_16: 6207 case ARM::VLD3DUPdAsm_32: 6208 case ARM::VLD3DUPqAsm_8: 6209 case ARM::VLD3DUPqAsm_16: 6210 case ARM::VLD3DUPqAsm_32: { 6211 MCInst TmpInst; 6212 unsigned Spacing; 6213 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6214 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6215 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6216 Spacing)); 6217 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6218 Spacing * 2)); 6219 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6220 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6221 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6222 TmpInst.addOperand(Inst.getOperand(4)); 6223 Inst = TmpInst; 6224 return true; 6225 } 6226 6227 case ARM::VLD3DUPdWB_fixed_Asm_8: 6228 case ARM::VLD3DUPdWB_fixed_Asm_16: 6229 case ARM::VLD3DUPdWB_fixed_Asm_32: 6230 case ARM::VLD3DUPqWB_fixed_Asm_8: 6231 case ARM::VLD3DUPqWB_fixed_Asm_16: 6232 case ARM::VLD3DUPqWB_fixed_Asm_32: { 6233 MCInst TmpInst; 6234 unsigned Spacing; 6235 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6236 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6237 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6238 Spacing)); 6239 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6240 Spacing * 2)); 6241 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6242 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6243 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6244 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6245 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6246 TmpInst.addOperand(Inst.getOperand(4)); 6247 Inst = TmpInst; 6248 return true; 6249 } 6250 6251 case ARM::VLD3DUPdWB_register_Asm_8: 6252 case ARM::VLD3DUPdWB_register_Asm_16: 6253 case ARM::VLD3DUPdWB_register_Asm_32: 6254 case ARM::VLD3DUPqWB_register_Asm_8: 6255 case ARM::VLD3DUPqWB_register_Asm_16: 6256 case ARM::VLD3DUPqWB_register_Asm_32: { 6257 MCInst TmpInst; 6258 unsigned Spacing; 6259 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6260 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6261 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6262 Spacing)); 6263 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6264 Spacing * 2)); 6265 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6266 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6267 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6268 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6269 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6270 TmpInst.addOperand(Inst.getOperand(5)); 6271 Inst = TmpInst; 6272 return true; 6273 } 6274 6275 // VLD3 multiple 3-element structure instructions. 6276 case ARM::VLD3dAsm_8: 6277 case ARM::VLD3dAsm_16: 6278 case ARM::VLD3dAsm_32: 6279 case ARM::VLD3qAsm_8: 6280 case ARM::VLD3qAsm_16: 6281 case ARM::VLD3qAsm_32: { 6282 MCInst TmpInst; 6283 unsigned Spacing; 6284 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6285 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6286 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6287 Spacing)); 6288 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6289 Spacing * 2)); 6290 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6291 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6292 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6293 TmpInst.addOperand(Inst.getOperand(4)); 6294 Inst = TmpInst; 6295 return true; 6296 } 6297 6298 case ARM::VLD3dWB_fixed_Asm_8: 6299 case ARM::VLD3dWB_fixed_Asm_16: 6300 case ARM::VLD3dWB_fixed_Asm_32: 6301 case ARM::VLD3qWB_fixed_Asm_8: 6302 case ARM::VLD3qWB_fixed_Asm_16: 6303 case ARM::VLD3qWB_fixed_Asm_32: { 6304 MCInst TmpInst; 6305 unsigned Spacing; 6306 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6307 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6308 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6309 Spacing)); 6310 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6311 Spacing * 2)); 6312 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6313 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6314 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6315 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6316 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6317 TmpInst.addOperand(Inst.getOperand(4)); 6318 Inst = TmpInst; 6319 return true; 6320 } 6321 6322 case ARM::VLD3dWB_register_Asm_8: 6323 case ARM::VLD3dWB_register_Asm_16: 6324 case ARM::VLD3dWB_register_Asm_32: 6325 case ARM::VLD3qWB_register_Asm_8: 6326 case ARM::VLD3qWB_register_Asm_16: 6327 case ARM::VLD3qWB_register_Asm_32: { 6328 MCInst TmpInst; 6329 unsigned Spacing; 6330 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6331 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6332 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6333 Spacing)); 6334 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6335 Spacing * 2)); 6336 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6337 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6338 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6339 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6340 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6341 TmpInst.addOperand(Inst.getOperand(5)); 6342 Inst = TmpInst; 6343 return true; 6344 } 6345 6346 // VLD4DUP single 3-element structure to all lanes instructions. 6347 case ARM::VLD4DUPdAsm_8: 6348 case ARM::VLD4DUPdAsm_16: 6349 case ARM::VLD4DUPdAsm_32: 6350 case ARM::VLD4DUPqAsm_8: 6351 case ARM::VLD4DUPqAsm_16: 6352 case ARM::VLD4DUPqAsm_32: { 6353 MCInst TmpInst; 6354 unsigned Spacing; 6355 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6356 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6357 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6358 Spacing)); 6359 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6360 Spacing * 2)); 6361 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6362 Spacing * 3)); 6363 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6364 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6365 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6366 TmpInst.addOperand(Inst.getOperand(4)); 6367 Inst = TmpInst; 6368 return true; 6369 } 6370 6371 case ARM::VLD4DUPdWB_fixed_Asm_8: 6372 case ARM::VLD4DUPdWB_fixed_Asm_16: 6373 case ARM::VLD4DUPdWB_fixed_Asm_32: 6374 case ARM::VLD4DUPqWB_fixed_Asm_8: 6375 case ARM::VLD4DUPqWB_fixed_Asm_16: 6376 case ARM::VLD4DUPqWB_fixed_Asm_32: { 6377 MCInst TmpInst; 6378 unsigned Spacing; 6379 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6380 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6381 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6382 Spacing)); 6383 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6384 Spacing * 2)); 6385 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6386 Spacing * 3)); 6387 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6388 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6389 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6390 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6391 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6392 TmpInst.addOperand(Inst.getOperand(4)); 6393 Inst = TmpInst; 6394 return true; 6395 } 6396 6397 case ARM::VLD4DUPdWB_register_Asm_8: 6398 case ARM::VLD4DUPdWB_register_Asm_16: 6399 case ARM::VLD4DUPdWB_register_Asm_32: 6400 case ARM::VLD4DUPqWB_register_Asm_8: 6401 case ARM::VLD4DUPqWB_register_Asm_16: 6402 case ARM::VLD4DUPqWB_register_Asm_32: { 6403 MCInst TmpInst; 6404 unsigned Spacing; 6405 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6406 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6407 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6408 Spacing)); 6409 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6410 Spacing * 2)); 6411 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6412 Spacing * 3)); 6413 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6414 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6415 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6416 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6417 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6418 TmpInst.addOperand(Inst.getOperand(5)); 6419 Inst = TmpInst; 6420 return true; 6421 } 6422 6423 // VLD4 multiple 4-element structure instructions. 6424 case ARM::VLD4dAsm_8: 6425 case ARM::VLD4dAsm_16: 6426 case ARM::VLD4dAsm_32: 6427 case ARM::VLD4qAsm_8: 6428 case ARM::VLD4qAsm_16: 6429 case ARM::VLD4qAsm_32: { 6430 MCInst TmpInst; 6431 unsigned Spacing; 6432 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6433 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6434 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6435 Spacing)); 6436 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6437 Spacing * 2)); 6438 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6439 Spacing * 3)); 6440 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6441 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6442 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6443 TmpInst.addOperand(Inst.getOperand(4)); 6444 Inst = TmpInst; 6445 return true; 6446 } 6447 6448 case ARM::VLD4dWB_fixed_Asm_8: 6449 case ARM::VLD4dWB_fixed_Asm_16: 6450 case ARM::VLD4dWB_fixed_Asm_32: 6451 case ARM::VLD4qWB_fixed_Asm_8: 6452 case ARM::VLD4qWB_fixed_Asm_16: 6453 case ARM::VLD4qWB_fixed_Asm_32: { 6454 MCInst TmpInst; 6455 unsigned Spacing; 6456 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6457 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6458 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6459 Spacing)); 6460 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6461 Spacing * 2)); 6462 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6463 Spacing * 3)); 6464 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6465 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6466 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6467 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6468 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6469 TmpInst.addOperand(Inst.getOperand(4)); 6470 Inst = TmpInst; 6471 return true; 6472 } 6473 6474 case ARM::VLD4dWB_register_Asm_8: 6475 case ARM::VLD4dWB_register_Asm_16: 6476 case ARM::VLD4dWB_register_Asm_32: 6477 case ARM::VLD4qWB_register_Asm_8: 6478 case ARM::VLD4qWB_register_Asm_16: 6479 case ARM::VLD4qWB_register_Asm_32: { 6480 MCInst TmpInst; 6481 unsigned Spacing; 6482 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 6483 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6484 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6485 Spacing)); 6486 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6487 Spacing * 2)); 6488 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6489 Spacing * 3)); 6490 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6491 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6492 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6493 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6494 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6495 TmpInst.addOperand(Inst.getOperand(5)); 6496 Inst = TmpInst; 6497 return true; 6498 } 6499 6500 // VST3 multiple 3-element structure instructions. 6501 case ARM::VST3dAsm_8: 6502 case ARM::VST3dAsm_16: 6503 case ARM::VST3dAsm_32: 6504 case ARM::VST3qAsm_8: 6505 case ARM::VST3qAsm_16: 6506 case ARM::VST3qAsm_32: { 6507 MCInst TmpInst; 6508 unsigned Spacing; 6509 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6510 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6511 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6512 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6513 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6514 Spacing)); 6515 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6516 Spacing * 2)); 6517 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6518 TmpInst.addOperand(Inst.getOperand(4)); 6519 Inst = TmpInst; 6520 return true; 6521 } 6522 6523 case ARM::VST3dWB_fixed_Asm_8: 6524 case ARM::VST3dWB_fixed_Asm_16: 6525 case ARM::VST3dWB_fixed_Asm_32: 6526 case ARM::VST3qWB_fixed_Asm_8: 6527 case ARM::VST3qWB_fixed_Asm_16: 6528 case ARM::VST3qWB_fixed_Asm_32: { 6529 MCInst TmpInst; 6530 unsigned Spacing; 6531 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6532 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6533 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6534 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6535 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6536 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6537 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6538 Spacing)); 6539 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6540 Spacing * 2)); 6541 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6542 TmpInst.addOperand(Inst.getOperand(4)); 6543 Inst = TmpInst; 6544 return true; 6545 } 6546 6547 case ARM::VST3dWB_register_Asm_8: 6548 case ARM::VST3dWB_register_Asm_16: 6549 case ARM::VST3dWB_register_Asm_32: 6550 case ARM::VST3qWB_register_Asm_8: 6551 case ARM::VST3qWB_register_Asm_16: 6552 case ARM::VST3qWB_register_Asm_32: { 6553 MCInst TmpInst; 6554 unsigned Spacing; 6555 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6556 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6557 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6558 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6559 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6560 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6561 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6562 Spacing)); 6563 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6564 Spacing * 2)); 6565 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6566 TmpInst.addOperand(Inst.getOperand(5)); 6567 Inst = TmpInst; 6568 return true; 6569 } 6570 6571 // VST4 multiple 3-element structure instructions. 6572 case ARM::VST4dAsm_8: 6573 case ARM::VST4dAsm_16: 6574 case ARM::VST4dAsm_32: 6575 case ARM::VST4qAsm_8: 6576 case ARM::VST4qAsm_16: 6577 case ARM::VST4qAsm_32: { 6578 MCInst TmpInst; 6579 unsigned Spacing; 6580 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6581 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6582 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6583 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6584 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6585 Spacing)); 6586 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6587 Spacing * 2)); 6588 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6589 Spacing * 3)); 6590 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6591 TmpInst.addOperand(Inst.getOperand(4)); 6592 Inst = TmpInst; 6593 return true; 6594 } 6595 6596 case ARM::VST4dWB_fixed_Asm_8: 6597 case ARM::VST4dWB_fixed_Asm_16: 6598 case ARM::VST4dWB_fixed_Asm_32: 6599 case ARM::VST4qWB_fixed_Asm_8: 6600 case ARM::VST4qWB_fixed_Asm_16: 6601 case ARM::VST4qWB_fixed_Asm_32: { 6602 MCInst TmpInst; 6603 unsigned Spacing; 6604 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6605 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6606 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6607 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6608 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 6609 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6610 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6611 Spacing)); 6612 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6613 Spacing * 2)); 6614 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6615 Spacing * 3)); 6616 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6617 TmpInst.addOperand(Inst.getOperand(4)); 6618 Inst = TmpInst; 6619 return true; 6620 } 6621 6622 case ARM::VST4dWB_register_Asm_8: 6623 case ARM::VST4dWB_register_Asm_16: 6624 case ARM::VST4dWB_register_Asm_32: 6625 case ARM::VST4qWB_register_Asm_8: 6626 case ARM::VST4qWB_register_Asm_16: 6627 case ARM::VST4qWB_register_Asm_32: { 6628 MCInst TmpInst; 6629 unsigned Spacing; 6630 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 6631 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6632 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 6633 TmpInst.addOperand(Inst.getOperand(2)); // alignment 6634 TmpInst.addOperand(Inst.getOperand(3)); // Rm 6635 TmpInst.addOperand(Inst.getOperand(0)); // Vd 6636 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6637 Spacing)); 6638 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6639 Spacing * 2)); 6640 TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() + 6641 Spacing * 3)); 6642 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6643 TmpInst.addOperand(Inst.getOperand(5)); 6644 Inst = TmpInst; 6645 return true; 6646 } 6647 6648 // Handle the Thumb2 mode MOV complex aliases. 6649 case ARM::t2MOVsr: 6650 case ARM::t2MOVSsr: { 6651 // Which instruction to expand to depends on the CCOut operand and 6652 // whether we're in an IT block if the register operands are low 6653 // registers. 6654 bool isNarrow = false; 6655 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 6656 isARMLowRegister(Inst.getOperand(1).getReg()) && 6657 isARMLowRegister(Inst.getOperand(2).getReg()) && 6658 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && 6659 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr)) 6660 isNarrow = true; 6661 MCInst TmpInst; 6662 unsigned newOpc; 6663 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) { 6664 default: llvm_unreachable("unexpected opcode!"); 6665 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break; 6666 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break; 6667 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break; 6668 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break; 6669 } 6670 TmpInst.setOpcode(newOpc); 6671 TmpInst.addOperand(Inst.getOperand(0)); // Rd 6672 if (isNarrow) 6673 TmpInst.addOperand(MCOperand::CreateReg( 6674 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); 6675 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6676 TmpInst.addOperand(Inst.getOperand(2)); // Rm 6677 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 6678 TmpInst.addOperand(Inst.getOperand(5)); 6679 if (!isNarrow) 6680 TmpInst.addOperand(MCOperand::CreateReg( 6681 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); 6682 Inst = TmpInst; 6683 return true; 6684 } 6685 case ARM::t2MOVsi: 6686 case ARM::t2MOVSsi: { 6687 // Which instruction to expand to depends on the CCOut operand and 6688 // whether we're in an IT block if the register operands are low 6689 // registers. 6690 bool isNarrow = false; 6691 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 6692 isARMLowRegister(Inst.getOperand(1).getReg()) && 6693 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi)) 6694 isNarrow = true; 6695 MCInst TmpInst; 6696 unsigned newOpc; 6697 switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) { 6698 default: llvm_unreachable("unexpected opcode!"); 6699 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break; 6700 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break; 6701 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break; 6702 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break; 6703 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break; 6704 } 6705 unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()); 6706 if (Ammount == 32) Ammount = 0; 6707 TmpInst.setOpcode(newOpc); 6708 TmpInst.addOperand(Inst.getOperand(0)); // Rd 6709 if (isNarrow) 6710 TmpInst.addOperand(MCOperand::CreateReg( 6711 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 6712 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6713 if (newOpc != ARM::t2RRX) 6714 TmpInst.addOperand(MCOperand::CreateImm(Ammount)); 6715 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6716 TmpInst.addOperand(Inst.getOperand(4)); 6717 if (!isNarrow) 6718 TmpInst.addOperand(MCOperand::CreateReg( 6719 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 6720 Inst = TmpInst; 6721 return true; 6722 } 6723 // Handle the ARM mode MOV complex aliases. 6724 case ARM::ASRr: 6725 case ARM::LSRr: 6726 case ARM::LSLr: 6727 case ARM::RORr: { 6728 ARM_AM::ShiftOpc ShiftTy; 6729 switch(Inst.getOpcode()) { 6730 default: llvm_unreachable("unexpected opcode!"); 6731 case ARM::ASRr: ShiftTy = ARM_AM::asr; break; 6732 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; 6733 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; 6734 case ARM::RORr: ShiftTy = ARM_AM::ror; break; 6735 } 6736 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); 6737 MCInst TmpInst; 6738 TmpInst.setOpcode(ARM::MOVsr); 6739 TmpInst.addOperand(Inst.getOperand(0)); // Rd 6740 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6741 TmpInst.addOperand(Inst.getOperand(2)); // Rm 6742 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 6743 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6744 TmpInst.addOperand(Inst.getOperand(4)); 6745 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 6746 Inst = TmpInst; 6747 return true; 6748 } 6749 case ARM::ASRi: 6750 case ARM::LSRi: 6751 case ARM::LSLi: 6752 case ARM::RORi: { 6753 ARM_AM::ShiftOpc ShiftTy; 6754 switch(Inst.getOpcode()) { 6755 default: llvm_unreachable("unexpected opcode!"); 6756 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 6757 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 6758 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 6759 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 6760 } 6761 // A shift by zero is a plain MOVr, not a MOVsi. 6762 unsigned Amt = Inst.getOperand(2).getImm(); 6763 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 6764 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 6765 MCInst TmpInst; 6766 TmpInst.setOpcode(Opc); 6767 TmpInst.addOperand(Inst.getOperand(0)); // Rd 6768 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6769 if (Opc == ARM::MOVsi) 6770 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 6771 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 6772 TmpInst.addOperand(Inst.getOperand(4)); 6773 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 6774 Inst = TmpInst; 6775 return true; 6776 } 6777 case ARM::RRXi: { 6778 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); 6779 MCInst TmpInst; 6780 TmpInst.setOpcode(ARM::MOVsi); 6781 TmpInst.addOperand(Inst.getOperand(0)); // Rd 6782 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6783 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 6784 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 6785 TmpInst.addOperand(Inst.getOperand(3)); 6786 TmpInst.addOperand(Inst.getOperand(4)); // cc_out 6787 Inst = TmpInst; 6788 return true; 6789 } 6790 case ARM::t2LDMIA_UPD: { 6791 // If this is a load of a single register, then we should use 6792 // a post-indexed LDR instruction instead, per the ARM ARM. 6793 if (Inst.getNumOperands() != 5) 6794 return false; 6795 MCInst TmpInst; 6796 TmpInst.setOpcode(ARM::t2LDR_POST); 6797 TmpInst.addOperand(Inst.getOperand(4)); // Rt 6798 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 6799 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6800 TmpInst.addOperand(MCOperand::CreateImm(4)); 6801 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 6802 TmpInst.addOperand(Inst.getOperand(3)); 6803 Inst = TmpInst; 6804 return true; 6805 } 6806 case ARM::t2STMDB_UPD: { 6807 // If this is a store of a single register, then we should use 6808 // a pre-indexed STR instruction instead, per the ARM ARM. 6809 if (Inst.getNumOperands() != 5) 6810 return false; 6811 MCInst TmpInst; 6812 TmpInst.setOpcode(ARM::t2STR_PRE); 6813 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 6814 TmpInst.addOperand(Inst.getOperand(4)); // Rt 6815 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6816 TmpInst.addOperand(MCOperand::CreateImm(-4)); 6817 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 6818 TmpInst.addOperand(Inst.getOperand(3)); 6819 Inst = TmpInst; 6820 return true; 6821 } 6822 case ARM::LDMIA_UPD: 6823 // If this is a load of a single register via a 'pop', then we should use 6824 // a post-indexed LDR instruction instead, per the ARM ARM. 6825 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 6826 Inst.getNumOperands() == 5) { 6827 MCInst TmpInst; 6828 TmpInst.setOpcode(ARM::LDR_POST_IMM); 6829 TmpInst.addOperand(Inst.getOperand(4)); // Rt 6830 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 6831 TmpInst.addOperand(Inst.getOperand(1)); // Rn 6832 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 6833 TmpInst.addOperand(MCOperand::CreateImm(4)); 6834 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 6835 TmpInst.addOperand(Inst.getOperand(3)); 6836 Inst = TmpInst; 6837 return true; 6838 } 6839 break; 6840 case ARM::STMDB_UPD: 6841 // If this is a store of a single register via a 'push', then we should use 6842 // a pre-indexed STR instruction instead, per the ARM ARM. 6843 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 6844 Inst.getNumOperands() == 5) { 6845 MCInst TmpInst; 6846 TmpInst.setOpcode(ARM::STR_PRE_IMM); 6847 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 6848 TmpInst.addOperand(Inst.getOperand(4)); // Rt 6849 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 6850 TmpInst.addOperand(MCOperand::CreateImm(-4)); 6851 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 6852 TmpInst.addOperand(Inst.getOperand(3)); 6853 Inst = TmpInst; 6854 } 6855 break; 6856 case ARM::t2ADDri12: 6857 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add" 6858 // mnemonic was used (not "addw"), encoding T3 is preferred. 6859 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" || 6860 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 6861 break; 6862 Inst.setOpcode(ARM::t2ADDri); 6863 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 6864 break; 6865 case ARM::t2SUBri12: 6866 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub" 6867 // mnemonic was used (not "subw"), encoding T3 is preferred. 6868 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" || 6869 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 6870 break; 6871 Inst.setOpcode(ARM::t2SUBri); 6872 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 6873 break; 6874 case ARM::tADDi8: 6875 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 6876 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 6877 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 6878 // to encoding T1 if <Rd> is omitted." 6879 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 6880 Inst.setOpcode(ARM::tADDi3); 6881 return true; 6882 } 6883 break; 6884 case ARM::tSUBi8: 6885 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 6886 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 6887 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 6888 // to encoding T1 if <Rd> is omitted." 6889 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 6890 Inst.setOpcode(ARM::tSUBi3); 6891 return true; 6892 } 6893 break; 6894 case ARM::t2ADDri: 6895 case ARM::t2SUBri: { 6896 // If the destination and first source operand are the same, and 6897 // the flags are compatible with the current IT status, use encoding T2 6898 // instead of T3. For compatibility with the system 'as'. Make sure the 6899 // wide encoding wasn't explicit. 6900 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 6901 !isARMLowRegister(Inst.getOperand(0).getReg()) || 6902 (unsigned)Inst.getOperand(2).getImm() > 255 || 6903 ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) || 6904 (inITBlock() && Inst.getOperand(5).getReg() != 0)) || 6905 (static_cast<ARMOperand*>(Operands[3])->isToken() && 6906 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) 6907 break; 6908 MCInst TmpInst; 6909 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ? 6910 ARM::tADDi8 : ARM::tSUBi8); 6911 TmpInst.addOperand(Inst.getOperand(0)); 6912 TmpInst.addOperand(Inst.getOperand(5)); 6913 TmpInst.addOperand(Inst.getOperand(0)); 6914 TmpInst.addOperand(Inst.getOperand(2)); 6915 TmpInst.addOperand(Inst.getOperand(3)); 6916 TmpInst.addOperand(Inst.getOperand(4)); 6917 Inst = TmpInst; 6918 return true; 6919 } 6920 case ARM::t2ADDrr: { 6921 // If the destination and first source operand are the same, and 6922 // there's no setting of the flags, use encoding T2 instead of T3. 6923 // Note that this is only for ADD, not SUB. This mirrors the system 6924 // 'as' behaviour. Make sure the wide encoding wasn't explicit. 6925 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 6926 Inst.getOperand(5).getReg() != 0 || 6927 (static_cast<ARMOperand*>(Operands[3])->isToken() && 6928 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) 6929 break; 6930 MCInst TmpInst; 6931 TmpInst.setOpcode(ARM::tADDhirr); 6932 TmpInst.addOperand(Inst.getOperand(0)); 6933 TmpInst.addOperand(Inst.getOperand(0)); 6934 TmpInst.addOperand(Inst.getOperand(2)); 6935 TmpInst.addOperand(Inst.getOperand(3)); 6936 TmpInst.addOperand(Inst.getOperand(4)); 6937 Inst = TmpInst; 6938 return true; 6939 } 6940 case ARM::tB: 6941 // A Thumb conditional branch outside of an IT block is a tBcc. 6942 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 6943 Inst.setOpcode(ARM::tBcc); 6944 return true; 6945 } 6946 break; 6947 case ARM::t2B: 6948 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 6949 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 6950 Inst.setOpcode(ARM::t2Bcc); 6951 return true; 6952 } 6953 break; 6954 case ARM::t2Bcc: 6955 // If the conditional is AL or we're in an IT block, we really want t2B. 6956 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 6957 Inst.setOpcode(ARM::t2B); 6958 return true; 6959 } 6960 break; 6961 case ARM::tBcc: 6962 // If the conditional is AL, we really want tB. 6963 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 6964 Inst.setOpcode(ARM::tB); 6965 return true; 6966 } 6967 break; 6968 case ARM::tLDMIA: { 6969 // If the register list contains any high registers, or if the writeback 6970 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 6971 // instead if we're in Thumb2. Otherwise, this should have generated 6972 // an error in validateInstruction(). 6973 unsigned Rn = Inst.getOperand(0).getReg(); 6974 bool hasWritebackToken = 6975 (static_cast<ARMOperand*>(Operands[3])->isToken() && 6976 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 6977 bool listContainsBase; 6978 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 6979 (!listContainsBase && !hasWritebackToken) || 6980 (listContainsBase && hasWritebackToken)) { 6981 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 6982 assert (isThumbTwo()); 6983 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 6984 // If we're switching to the updating version, we need to insert 6985 // the writeback tied operand. 6986 if (hasWritebackToken) 6987 Inst.insert(Inst.begin(), 6988 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 6989 return true; 6990 } 6991 break; 6992 } 6993 case ARM::tSTMIA_UPD: { 6994 // If the register list contains any high registers, we need to use 6995 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 6996 // should have generated an error in validateInstruction(). 6997 unsigned Rn = Inst.getOperand(0).getReg(); 6998 bool listContainsBase; 6999 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 7000 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 7001 assert (isThumbTwo()); 7002 Inst.setOpcode(ARM::t2STMIA_UPD); 7003 return true; 7004 } 7005 break; 7006 } 7007 case ARM::tPOP: { 7008 bool listContainsBase; 7009 // If the register list contains any high registers, we need to use 7010 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 7011 // should have generated an error in validateInstruction(). 7012 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 7013 return false; 7014 assert (isThumbTwo()); 7015 Inst.setOpcode(ARM::t2LDMIA_UPD); 7016 // Add the base register and writeback operands. 7017 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 7018 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 7019 return true; 7020 } 7021 case ARM::tPUSH: { 7022 bool listContainsBase; 7023 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 7024 return false; 7025 assert (isThumbTwo()); 7026 Inst.setOpcode(ARM::t2STMDB_UPD); 7027 // Add the base register and writeback operands. 7028 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 7029 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 7030 return true; 7031 } 7032 case ARM::t2MOVi: { 7033 // If we can use the 16-bit encoding and the user didn't explicitly 7034 // request the 32-bit variant, transform it here. 7035 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 7036 (unsigned)Inst.getOperand(1).getImm() <= 255 && 7037 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 7038 Inst.getOperand(4).getReg() == ARM::CPSR) || 7039 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 7040 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 7041 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 7042 // The operands aren't in the same order for tMOVi8... 7043 MCInst TmpInst; 7044 TmpInst.setOpcode(ARM::tMOVi8); 7045 TmpInst.addOperand(Inst.getOperand(0)); 7046 TmpInst.addOperand(Inst.getOperand(4)); 7047 TmpInst.addOperand(Inst.getOperand(1)); 7048 TmpInst.addOperand(Inst.getOperand(2)); 7049 TmpInst.addOperand(Inst.getOperand(3)); 7050 Inst = TmpInst; 7051 return true; 7052 } 7053 break; 7054 } 7055 case ARM::t2MOVr: { 7056 // If we can use the 16-bit encoding and the user didn't explicitly 7057 // request the 32-bit variant, transform it here. 7058 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 7059 isARMLowRegister(Inst.getOperand(1).getReg()) && 7060 Inst.getOperand(2).getImm() == ARMCC::AL && 7061 Inst.getOperand(4).getReg() == ARM::CPSR && 7062 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 7063 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 7064 // The operands aren't the same for tMOV[S]r... (no cc_out) 7065 MCInst TmpInst; 7066 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 7067 TmpInst.addOperand(Inst.getOperand(0)); 7068 TmpInst.addOperand(Inst.getOperand(1)); 7069 TmpInst.addOperand(Inst.getOperand(2)); 7070 TmpInst.addOperand(Inst.getOperand(3)); 7071 Inst = TmpInst; 7072 return true; 7073 } 7074 break; 7075 } 7076 case ARM::t2SXTH: 7077 case ARM::t2SXTB: 7078 case ARM::t2UXTH: 7079 case ARM::t2UXTB: { 7080 // If we can use the 16-bit encoding and the user didn't explicitly 7081 // request the 32-bit variant, transform it here. 7082 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 7083 isARMLowRegister(Inst.getOperand(1).getReg()) && 7084 Inst.getOperand(2).getImm() == 0 && 7085 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 7086 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 7087 unsigned NewOpc; 7088 switch (Inst.getOpcode()) { 7089 default: llvm_unreachable("Illegal opcode!"); 7090 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 7091 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 7092 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 7093 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 7094 } 7095 // The operands aren't the same for thumb1 (no rotate operand). 7096 MCInst TmpInst; 7097 TmpInst.setOpcode(NewOpc); 7098 TmpInst.addOperand(Inst.getOperand(0)); 7099 TmpInst.addOperand(Inst.getOperand(1)); 7100 TmpInst.addOperand(Inst.getOperand(3)); 7101 TmpInst.addOperand(Inst.getOperand(4)); 7102 Inst = TmpInst; 7103 return true; 7104 } 7105 break; 7106 } 7107 case ARM::MOVsi: { 7108 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm()); 7109 if (SOpc == ARM_AM::rrx) return false; 7110 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) { 7111 // Shifting by zero is accepted as a vanilla 'MOVr' 7112 MCInst TmpInst; 7113 TmpInst.setOpcode(ARM::MOVr); 7114 TmpInst.addOperand(Inst.getOperand(0)); 7115 TmpInst.addOperand(Inst.getOperand(1)); 7116 TmpInst.addOperand(Inst.getOperand(3)); 7117 TmpInst.addOperand(Inst.getOperand(4)); 7118 TmpInst.addOperand(Inst.getOperand(5)); 7119 Inst = TmpInst; 7120 return true; 7121 } 7122 return false; 7123 } 7124 case ARM::ANDrsi: 7125 case ARM::ORRrsi: 7126 case ARM::EORrsi: 7127 case ARM::BICrsi: 7128 case ARM::SUBrsi: 7129 case ARM::ADDrsi: { 7130 unsigned newOpc; 7131 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm()); 7132 if (SOpc == ARM_AM::rrx) return false; 7133 switch (Inst.getOpcode()) { 7134 default: llvm_unreachable("unexpected opcode!"); 7135 case ARM::ANDrsi: newOpc = ARM::ANDrr; break; 7136 case ARM::ORRrsi: newOpc = ARM::ORRrr; break; 7137 case ARM::EORrsi: newOpc = ARM::EORrr; break; 7138 case ARM::BICrsi: newOpc = ARM::BICrr; break; 7139 case ARM::SUBrsi: newOpc = ARM::SUBrr; break; 7140 case ARM::ADDrsi: newOpc = ARM::ADDrr; break; 7141 } 7142 // If the shift is by zero, use the non-shifted instruction definition. 7143 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) { 7144 MCInst TmpInst; 7145 TmpInst.setOpcode(newOpc); 7146 TmpInst.addOperand(Inst.getOperand(0)); 7147 TmpInst.addOperand(Inst.getOperand(1)); 7148 TmpInst.addOperand(Inst.getOperand(2)); 7149 TmpInst.addOperand(Inst.getOperand(4)); 7150 TmpInst.addOperand(Inst.getOperand(5)); 7151 TmpInst.addOperand(Inst.getOperand(6)); 7152 Inst = TmpInst; 7153 return true; 7154 } 7155 return false; 7156 } 7157 case ARM::ITasm: 7158 case ARM::t2IT: { 7159 // The mask bits for all but the first condition are represented as 7160 // the low bit of the condition code value implies 't'. We currently 7161 // always have 1 implies 't', so XOR toggle the bits if the low bit 7162 // of the condition code is zero. The encoding also expects the low 7163 // bit of the condition to be encoded as bit 4 of the mask operand, 7164 // so mask that in if needed 7165 MCOperand &MO = Inst.getOperand(1); 7166 unsigned Mask = MO.getImm(); 7167 unsigned OrigMask = Mask; 7168 unsigned TZ = CountTrailingZeros_32(Mask); 7169 if ((Inst.getOperand(0).getImm() & 1) == 0) { 7170 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 7171 for (unsigned i = 3; i != TZ; --i) 7172 Mask ^= 1 << i; 7173 } else 7174 Mask |= 0x10; 7175 MO.setImm(Mask); 7176 7177 // Set up the IT block state according to the IT instruction we just 7178 // matched. 7179 assert(!inITBlock() && "nested IT blocks?!"); 7180 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 7181 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 7182 ITState.CurPosition = 0; 7183 ITState.FirstCond = true; 7184 break; 7185 } 7186 } 7187 return false; 7188 } 7189 7190 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 7191 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 7192 // suffix depending on whether they're in an IT block or not. 7193 unsigned Opc = Inst.getOpcode(); 7194 const MCInstrDesc &MCID = getInstDesc(Opc); 7195 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 7196 assert(MCID.hasOptionalDef() && 7197 "optionally flag setting instruction missing optional def operand"); 7198 assert(MCID.NumOperands == Inst.getNumOperands() && 7199 "operand count mismatch!"); 7200 // Find the optional-def operand (cc_out). 7201 unsigned OpNo; 7202 for (OpNo = 0; 7203 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 7204 ++OpNo) 7205 ; 7206 // If we're parsing Thumb1, reject it completely. 7207 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 7208 return Match_MnemonicFail; 7209 // If we're parsing Thumb2, which form is legal depends on whether we're 7210 // in an IT block. 7211 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 7212 !inITBlock()) 7213 return Match_RequiresITBlock; 7214 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 7215 inITBlock()) 7216 return Match_RequiresNotITBlock; 7217 } 7218 // Some high-register supporting Thumb1 encodings only allow both registers 7219 // to be from r0-r7 when in Thumb2. 7220 else if (Opc == ARM::tADDhirr && isThumbOne() && 7221 isARMLowRegister(Inst.getOperand(1).getReg()) && 7222 isARMLowRegister(Inst.getOperand(2).getReg())) 7223 return Match_RequiresThumb2; 7224 // Others only require ARMv6 or later. 7225 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 7226 isARMLowRegister(Inst.getOperand(0).getReg()) && 7227 isARMLowRegister(Inst.getOperand(1).getReg())) 7228 return Match_RequiresV6; 7229 return Match_Success; 7230 } 7231 7232 bool ARMAsmParser:: 7233 MatchAndEmitInstruction(SMLoc IDLoc, 7234 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 7235 MCStreamer &Out) { 7236 MCInst Inst; 7237 unsigned ErrorInfo; 7238 unsigned MatchResult; 7239 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 7240 switch (MatchResult) { 7241 default: break; 7242 case Match_Success: 7243 // Context sensitive operand constraints aren't handled by the matcher, 7244 // so check them here. 7245 if (validateInstruction(Inst, Operands)) { 7246 // Still progress the IT block, otherwise one wrong condition causes 7247 // nasty cascading errors. 7248 forwardITPosition(); 7249 return true; 7250 } 7251 7252 // Some instructions need post-processing to, for example, tweak which 7253 // encoding is selected. Loop on it while changes happen so the 7254 // individual transformations can chain off each other. E.g., 7255 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 7256 while (processInstruction(Inst, Operands)) 7257 ; 7258 7259 // Only move forward at the very end so that everything in validate 7260 // and process gets a consistent answer about whether we're in an IT 7261 // block. 7262 forwardITPosition(); 7263 7264 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and 7265 // doesn't actually encode. 7266 if (Inst.getOpcode() == ARM::ITasm) 7267 return false; 7268 7269 Inst.setLoc(IDLoc); 7270 Out.EmitInstruction(Inst); 7271 return false; 7272 case Match_MissingFeature: 7273 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 7274 return true; 7275 case Match_InvalidOperand: { 7276 SMLoc ErrorLoc = IDLoc; 7277 if (ErrorInfo != ~0U) { 7278 if (ErrorInfo >= Operands.size()) 7279 return Error(IDLoc, "too few operands for instruction"); 7280 7281 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 7282 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 7283 } 7284 7285 return Error(ErrorLoc, "invalid operand for instruction"); 7286 } 7287 case Match_MnemonicFail: 7288 return Error(IDLoc, "invalid instruction"); 7289 case Match_ConversionFail: 7290 // The converter function will have already emited a diagnostic. 7291 return true; 7292 case Match_RequiresNotITBlock: 7293 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 7294 case Match_RequiresITBlock: 7295 return Error(IDLoc, "instruction only valid inside IT block"); 7296 case Match_RequiresV6: 7297 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 7298 case Match_RequiresThumb2: 7299 return Error(IDLoc, "instruction variant requires Thumb2"); 7300 } 7301 7302 llvm_unreachable("Implement any new match types added!"); 7303 } 7304 7305 /// parseDirective parses the arm specific directives 7306 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 7307 StringRef IDVal = DirectiveID.getIdentifier(); 7308 if (IDVal == ".word") 7309 return parseDirectiveWord(4, DirectiveID.getLoc()); 7310 else if (IDVal == ".thumb") 7311 return parseDirectiveThumb(DirectiveID.getLoc()); 7312 else if (IDVal == ".arm") 7313 return parseDirectiveARM(DirectiveID.getLoc()); 7314 else if (IDVal == ".thumb_func") 7315 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 7316 else if (IDVal == ".code") 7317 return parseDirectiveCode(DirectiveID.getLoc()); 7318 else if (IDVal == ".syntax") 7319 return parseDirectiveSyntax(DirectiveID.getLoc()); 7320 else if (IDVal == ".unreq") 7321 return parseDirectiveUnreq(DirectiveID.getLoc()); 7322 else if (IDVal == ".arch") 7323 return parseDirectiveArch(DirectiveID.getLoc()); 7324 else if (IDVal == ".eabi_attribute") 7325 return parseDirectiveEabiAttr(DirectiveID.getLoc()); 7326 return true; 7327 } 7328 7329 /// parseDirectiveWord 7330 /// ::= .word [ expression (, expression)* ] 7331 bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 7332 if (getLexer().isNot(AsmToken::EndOfStatement)) { 7333 for (;;) { 7334 const MCExpr *Value; 7335 if (getParser().ParseExpression(Value)) 7336 return true; 7337 7338 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 7339 7340 if (getLexer().is(AsmToken::EndOfStatement)) 7341 break; 7342 7343 // FIXME: Improve diagnostic. 7344 if (getLexer().isNot(AsmToken::Comma)) 7345 return Error(L, "unexpected token in directive"); 7346 Parser.Lex(); 7347 } 7348 } 7349 7350 Parser.Lex(); 7351 return false; 7352 } 7353 7354 /// parseDirectiveThumb 7355 /// ::= .thumb 7356 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 7357 if (getLexer().isNot(AsmToken::EndOfStatement)) 7358 return Error(L, "unexpected token in directive"); 7359 Parser.Lex(); 7360 7361 if (!isThumb()) 7362 SwitchMode(); 7363 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 7364 return false; 7365 } 7366 7367 /// parseDirectiveARM 7368 /// ::= .arm 7369 bool ARMAsmParser::parseDirectiveARM(SMLoc L) { 7370 if (getLexer().isNot(AsmToken::EndOfStatement)) 7371 return Error(L, "unexpected token in directive"); 7372 Parser.Lex(); 7373 7374 if (isThumb()) 7375 SwitchMode(); 7376 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 7377 return false; 7378 } 7379 7380 /// parseDirectiveThumbFunc 7381 /// ::= .thumbfunc symbol_name 7382 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 7383 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 7384 bool isMachO = MAI.hasSubsectionsViaSymbols(); 7385 StringRef Name; 7386 bool needFuncName = true; 7387 7388 // Darwin asm has (optionally) function name after .thumb_func direction 7389 // ELF doesn't 7390 if (isMachO) { 7391 const AsmToken &Tok = Parser.getTok(); 7392 if (Tok.isNot(AsmToken::EndOfStatement)) { 7393 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 7394 return Error(L, "unexpected token in .thumb_func directive"); 7395 Name = Tok.getIdentifier(); 7396 Parser.Lex(); // Consume the identifier token. 7397 needFuncName = false; 7398 } 7399 } 7400 7401 if (getLexer().isNot(AsmToken::EndOfStatement)) 7402 return Error(L, "unexpected token in directive"); 7403 7404 // Eat the end of statement and any blank lines that follow. 7405 while (getLexer().is(AsmToken::EndOfStatement)) 7406 Parser.Lex(); 7407 7408 // FIXME: assuming function name will be the line following .thumb_func 7409 // We really should be checking the next symbol definition even if there's 7410 // stuff in between. 7411 if (needFuncName) { 7412 Name = Parser.getTok().getIdentifier(); 7413 } 7414 7415 // Mark symbol as a thumb symbol. 7416 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 7417 getParser().getStreamer().EmitThumbFunc(Func); 7418 return false; 7419 } 7420 7421 /// parseDirectiveSyntax 7422 /// ::= .syntax unified | divided 7423 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 7424 const AsmToken &Tok = Parser.getTok(); 7425 if (Tok.isNot(AsmToken::Identifier)) 7426 return Error(L, "unexpected token in .syntax directive"); 7427 StringRef Mode = Tok.getString(); 7428 if (Mode == "unified" || Mode == "UNIFIED") 7429 Parser.Lex(); 7430 else if (Mode == "divided" || Mode == "DIVIDED") 7431 return Error(L, "'.syntax divided' arm asssembly not supported"); 7432 else 7433 return Error(L, "unrecognized syntax mode in .syntax directive"); 7434 7435 if (getLexer().isNot(AsmToken::EndOfStatement)) 7436 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 7437 Parser.Lex(); 7438 7439 // TODO tell the MC streamer the mode 7440 // getParser().getStreamer().Emit???(); 7441 return false; 7442 } 7443 7444 /// parseDirectiveCode 7445 /// ::= .code 16 | 32 7446 bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 7447 const AsmToken &Tok = Parser.getTok(); 7448 if (Tok.isNot(AsmToken::Integer)) 7449 return Error(L, "unexpected token in .code directive"); 7450 int64_t Val = Parser.getTok().getIntVal(); 7451 if (Val == 16) 7452 Parser.Lex(); 7453 else if (Val == 32) 7454 Parser.Lex(); 7455 else 7456 return Error(L, "invalid operand to .code directive"); 7457 7458 if (getLexer().isNot(AsmToken::EndOfStatement)) 7459 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 7460 Parser.Lex(); 7461 7462 if (Val == 16) { 7463 if (!isThumb()) 7464 SwitchMode(); 7465 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 7466 } else { 7467 if (isThumb()) 7468 SwitchMode(); 7469 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 7470 } 7471 7472 return false; 7473 } 7474 7475 /// parseDirectiveReq 7476 /// ::= name .req registername 7477 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { 7478 Parser.Lex(); // Eat the '.req' token. 7479 unsigned Reg; 7480 SMLoc SRegLoc, ERegLoc; 7481 if (ParseRegister(Reg, SRegLoc, ERegLoc)) { 7482 Parser.EatToEndOfStatement(); 7483 return Error(SRegLoc, "register name expected"); 7484 } 7485 7486 // Shouldn't be anything else. 7487 if (Parser.getTok().isNot(AsmToken::EndOfStatement)) { 7488 Parser.EatToEndOfStatement(); 7489 return Error(Parser.getTok().getLoc(), 7490 "unexpected input in .req directive."); 7491 } 7492 7493 Parser.Lex(); // Consume the EndOfStatement 7494 7495 if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg) 7496 return Error(SRegLoc, "redefinition of '" + Name + 7497 "' does not match original."); 7498 7499 return false; 7500 } 7501 7502 /// parseDirectiveUneq 7503 /// ::= .unreq registername 7504 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) { 7505 if (Parser.getTok().isNot(AsmToken::Identifier)) { 7506 Parser.EatToEndOfStatement(); 7507 return Error(L, "unexpected input in .unreq directive."); 7508 } 7509 RegisterReqs.erase(Parser.getTok().getIdentifier()); 7510 Parser.Lex(); // Eat the identifier. 7511 return false; 7512 } 7513 7514 /// parseDirectiveArch 7515 /// ::= .arch token 7516 bool ARMAsmParser::parseDirectiveArch(SMLoc L) { 7517 return true; 7518 } 7519 7520 /// parseDirectiveEabiAttr 7521 /// ::= .eabi_attribute int, int 7522 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) { 7523 return true; 7524 } 7525 7526 extern "C" void LLVMInitializeARMAsmLexer(); 7527 7528 /// Force static initialization. 7529 extern "C" void LLVMInitializeARMAsmParser() { 7530 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 7531 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 7532 LLVMInitializeARMAsmLexer(); 7533 } 7534 7535 #define GET_REGISTER_MATCHER 7536 #define GET_MATCHER_IMPLEMENTATION 7537 #include "ARMGenAsmMatcher.inc" 7538