1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #include "MCTargetDesc/ARMBaseInfo.h" 11 #include "MCTargetDesc/ARMAddressingModes.h" 12 #include "MCTargetDesc/ARMMCExpr.h" 13 #include "llvm/MC/MCParser/MCAsmLexer.h" 14 #include "llvm/MC/MCParser/MCAsmParser.h" 15 #include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16 #include "llvm/MC/MCAsmInfo.h" 17 #include "llvm/MC/MCContext.h" 18 #include "llvm/MC/MCStreamer.h" 19 #include "llvm/MC/MCExpr.h" 20 #include "llvm/MC/MCInst.h" 21 #include "llvm/MC/MCInstrDesc.h" 22 #include "llvm/MC/MCRegisterInfo.h" 23 #include "llvm/MC/MCSubtargetInfo.h" 24 #include "llvm/MC/MCTargetAsmParser.h" 25 #include "llvm/Support/MathExtras.h" 26 #include "llvm/Support/SourceMgr.h" 27 #include "llvm/Support/TargetRegistry.h" 28 #include "llvm/Support/raw_ostream.h" 29 #include "llvm/ADT/BitVector.h" 30 #include "llvm/ADT/OwningPtr.h" 31 #include "llvm/ADT/STLExtras.h" 32 #include "llvm/ADT/SmallVector.h" 33 #include "llvm/ADT/StringSwitch.h" 34 #include "llvm/ADT/Twine.h" 35 36 using namespace llvm; 37 38 namespace { 39 40 class ARMOperand; 41 42 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; 43 44 class ARMAsmParser : public MCTargetAsmParser { 45 MCSubtargetInfo &STI; 46 MCAsmParser &Parser; 47 48 struct { 49 ARMCC::CondCodes Cond; // Condition for IT block. 50 unsigned Mask:4; // Condition mask for instructions. 51 // Starting at first 1 (from lsb). 52 // '1' condition as indicated in IT. 53 // '0' inverse of condition (else). 54 // Count of instructions in IT block is 55 // 4 - trailingzeroes(mask) 56 57 bool FirstCond; // Explicit flag for when we're parsing the 58 // First instruction in the IT block. It's 59 // implied in the mask, so needs special 60 // handling. 61 62 unsigned CurPosition; // Current position in parsing of IT 63 // block. In range [0,3]. Initialized 64 // according to count of instructions in block. 65 // ~0U if no active IT block. 66 } ITState; 67 bool inITBlock() { return ITState.CurPosition != ~0U;} 68 void forwardITPosition() { 69 if (!inITBlock()) return; 70 // Move to the next instruction in the IT block, if there is one. If not, 71 // mark the block as done. 72 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 73 if (++ITState.CurPosition == 5 - TZ) 74 ITState.CurPosition = ~0U; // Done with the IT block after this. 75 } 76 77 78 MCAsmParser &getParser() const { return Parser; } 79 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 80 81 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 82 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 83 84 int tryParseRegister(); 85 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 86 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 89 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 90 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 91 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 92 unsigned &ShiftAmount); 93 bool parseDirectiveWord(unsigned Size, SMLoc L); 94 bool parseDirectiveThumb(SMLoc L); 95 bool parseDirectiveARM(SMLoc L); 96 bool parseDirectiveThumbFunc(SMLoc L); 97 bool parseDirectiveCode(SMLoc L); 98 bool parseDirectiveSyntax(SMLoc L); 99 100 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 101 bool &CarrySetting, unsigned &ProcessorIMod, 102 StringRef &ITMask); 103 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 104 bool &CanAcceptPredicationCode); 105 106 bool isThumb() const { 107 // FIXME: Can tablegen auto-generate this? 108 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 109 } 110 bool isThumbOne() const { 111 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 112 } 113 bool isThumbTwo() const { 114 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 115 } 116 bool hasV6Ops() const { 117 return STI.getFeatureBits() & ARM::HasV6Ops; 118 } 119 bool hasV7Ops() const { 120 return STI.getFeatureBits() & ARM::HasV7Ops; 121 } 122 void SwitchMode() { 123 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 124 setAvailableFeatures(FB); 125 } 126 bool isMClass() const { 127 return STI.getFeatureBits() & ARM::FeatureMClass; 128 } 129 130 /// @name Auto-generated Match Functions 131 /// { 132 133 #define GET_ASSEMBLER_HEADER 134 #include "ARMGenAsmMatcher.inc" 135 136 /// } 137 138 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocNumOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseCoprocRegOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseCoprocOptionOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseMemBarrierOptOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseProcIFlagsOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parseMSRMaskOperand( 150 SmallVectorImpl<MCParsedAsmOperand*>&); 151 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 152 StringRef Op, int Low, int High); 153 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 154 return parsePKHImm(O, "lsl", 0, 31); 155 } 156 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 157 return parsePKHImm(O, "asr", 1, 32); 158 } 159 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 164 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 165 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 166 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 167 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index); 168 169 // Asm Match Converter Methods 170 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 171 const SmallVectorImpl<MCParsedAsmOperand*> &); 172 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 173 const SmallVectorImpl<MCParsedAsmOperand*> &); 174 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 175 const SmallVectorImpl<MCParsedAsmOperand*> &); 176 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 177 const SmallVectorImpl<MCParsedAsmOperand*> &); 178 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 179 const SmallVectorImpl<MCParsedAsmOperand*> &); 180 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 181 const SmallVectorImpl<MCParsedAsmOperand*> &); 182 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 183 const SmallVectorImpl<MCParsedAsmOperand*> &); 184 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 185 const SmallVectorImpl<MCParsedAsmOperand*> &); 186 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 187 const SmallVectorImpl<MCParsedAsmOperand*> &); 188 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 189 const SmallVectorImpl<MCParsedAsmOperand*> &); 190 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 191 const SmallVectorImpl<MCParsedAsmOperand*> &); 192 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 193 const SmallVectorImpl<MCParsedAsmOperand*> &); 194 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 195 const SmallVectorImpl<MCParsedAsmOperand*> &); 196 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 197 const SmallVectorImpl<MCParsedAsmOperand*> &); 198 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 199 const SmallVectorImpl<MCParsedAsmOperand*> &); 200 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 201 const SmallVectorImpl<MCParsedAsmOperand*> &); 202 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 203 const SmallVectorImpl<MCParsedAsmOperand*> &); 204 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 205 const SmallVectorImpl<MCParsedAsmOperand*> &); 206 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 207 const SmallVectorImpl<MCParsedAsmOperand*> &); 208 bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 209 const SmallVectorImpl<MCParsedAsmOperand*> &); 210 bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 211 const SmallVectorImpl<MCParsedAsmOperand*> &); 212 213 bool validateInstruction(MCInst &Inst, 214 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 215 bool processInstruction(MCInst &Inst, 216 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 217 bool shouldOmitCCOutOperand(StringRef Mnemonic, 218 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 219 220 public: 221 enum ARMMatchResultTy { 222 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 223 Match_RequiresNotITBlock, 224 Match_RequiresV6, 225 Match_RequiresThumb2 226 }; 227 228 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 229 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 230 MCAsmParserExtension::Initialize(_Parser); 231 232 // Initialize the set of available features. 233 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 234 235 // Not in an ITBlock to start with. 236 ITState.CurPosition = ~0U; 237 } 238 239 // Implementation of the MCTargetAsmParser interface: 240 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 241 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 242 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 243 bool ParseDirective(AsmToken DirectiveID); 244 245 unsigned checkTargetMatchPredicate(MCInst &Inst); 246 247 bool MatchAndEmitInstruction(SMLoc IDLoc, 248 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 249 MCStreamer &Out); 250 }; 251 } // end anonymous namespace 252 253 namespace { 254 255 /// ARMOperand - Instances of this class represent a parsed ARM machine 256 /// instruction. 257 class ARMOperand : public MCParsedAsmOperand { 258 enum KindTy { 259 k_CondCode, 260 k_CCOut, 261 k_ITCondMask, 262 k_CoprocNum, 263 k_CoprocReg, 264 k_CoprocOption, 265 k_Immediate, 266 k_FPImmediate, 267 k_MemBarrierOpt, 268 k_Memory, 269 k_PostIndexRegister, 270 k_MSRMask, 271 k_ProcIFlags, 272 k_VectorIndex, 273 k_Register, 274 k_RegisterList, 275 k_DPRRegisterList, 276 k_SPRRegisterList, 277 k_VectorList, 278 k_VectorListAllLanes, 279 k_VectorListIndexed, 280 k_ShiftedRegister, 281 k_ShiftedImmediate, 282 k_ShifterImmediate, 283 k_RotateImmediate, 284 k_BitfieldDescriptor, 285 k_Token 286 } Kind; 287 288 SMLoc StartLoc, EndLoc; 289 SmallVector<unsigned, 8> Registers; 290 291 union { 292 struct { 293 ARMCC::CondCodes Val; 294 } CC; 295 296 struct { 297 unsigned Val; 298 } Cop; 299 300 struct { 301 unsigned Val; 302 } CoprocOption; 303 304 struct { 305 unsigned Mask:4; 306 } ITMask; 307 308 struct { 309 ARM_MB::MemBOpt Val; 310 } MBOpt; 311 312 struct { 313 ARM_PROC::IFlags Val; 314 } IFlags; 315 316 struct { 317 unsigned Val; 318 } MMask; 319 320 struct { 321 const char *Data; 322 unsigned Length; 323 } Tok; 324 325 struct { 326 unsigned RegNum; 327 } Reg; 328 329 // A vector register list is a sequential list of 1 to 4 registers. 330 struct { 331 unsigned RegNum; 332 unsigned Count; 333 unsigned LaneIndex; 334 } VectorList; 335 336 struct { 337 unsigned Val; 338 } VectorIndex; 339 340 struct { 341 const MCExpr *Val; 342 } Imm; 343 344 struct { 345 unsigned Val; // encoded 8-bit representation 346 } FPImm; 347 348 /// Combined record for all forms of ARM address expressions. 349 struct { 350 unsigned BaseRegNum; 351 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 352 // was specified. 353 const MCConstantExpr *OffsetImm; // Offset immediate value 354 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 355 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 356 unsigned ShiftImm; // shift for OffsetReg. 357 unsigned Alignment; // 0 = no alignment specified 358 // n = alignment in bytes (8, 16, or 32) 359 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 360 } Memory; 361 362 struct { 363 unsigned RegNum; 364 bool isAdd; 365 ARM_AM::ShiftOpc ShiftTy; 366 unsigned ShiftImm; 367 } PostIdxReg; 368 369 struct { 370 bool isASR; 371 unsigned Imm; 372 } ShifterImm; 373 struct { 374 ARM_AM::ShiftOpc ShiftTy; 375 unsigned SrcReg; 376 unsigned ShiftReg; 377 unsigned ShiftImm; 378 } RegShiftedReg; 379 struct { 380 ARM_AM::ShiftOpc ShiftTy; 381 unsigned SrcReg; 382 unsigned ShiftImm; 383 } RegShiftedImm; 384 struct { 385 unsigned Imm; 386 } RotImm; 387 struct { 388 unsigned LSB; 389 unsigned Width; 390 } Bitfield; 391 }; 392 393 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 394 public: 395 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 396 Kind = o.Kind; 397 StartLoc = o.StartLoc; 398 EndLoc = o.EndLoc; 399 switch (Kind) { 400 case k_CondCode: 401 CC = o.CC; 402 break; 403 case k_ITCondMask: 404 ITMask = o.ITMask; 405 break; 406 case k_Token: 407 Tok = o.Tok; 408 break; 409 case k_CCOut: 410 case k_Register: 411 Reg = o.Reg; 412 break; 413 case k_RegisterList: 414 case k_DPRRegisterList: 415 case k_SPRRegisterList: 416 Registers = o.Registers; 417 break; 418 case k_VectorList: 419 case k_VectorListAllLanes: 420 case k_VectorListIndexed: 421 VectorList = o.VectorList; 422 break; 423 case k_CoprocNum: 424 case k_CoprocReg: 425 Cop = o.Cop; 426 break; 427 case k_CoprocOption: 428 CoprocOption = o.CoprocOption; 429 break; 430 case k_Immediate: 431 Imm = o.Imm; 432 break; 433 case k_FPImmediate: 434 FPImm = o.FPImm; 435 break; 436 case k_MemBarrierOpt: 437 MBOpt = o.MBOpt; 438 break; 439 case k_Memory: 440 Memory = o.Memory; 441 break; 442 case k_PostIndexRegister: 443 PostIdxReg = o.PostIdxReg; 444 break; 445 case k_MSRMask: 446 MMask = o.MMask; 447 break; 448 case k_ProcIFlags: 449 IFlags = o.IFlags; 450 break; 451 case k_ShifterImmediate: 452 ShifterImm = o.ShifterImm; 453 break; 454 case k_ShiftedRegister: 455 RegShiftedReg = o.RegShiftedReg; 456 break; 457 case k_ShiftedImmediate: 458 RegShiftedImm = o.RegShiftedImm; 459 break; 460 case k_RotateImmediate: 461 RotImm = o.RotImm; 462 break; 463 case k_BitfieldDescriptor: 464 Bitfield = o.Bitfield; 465 break; 466 case k_VectorIndex: 467 VectorIndex = o.VectorIndex; 468 break; 469 } 470 } 471 472 /// getStartLoc - Get the location of the first token of this operand. 473 SMLoc getStartLoc() const { return StartLoc; } 474 /// getEndLoc - Get the location of the last token of this operand. 475 SMLoc getEndLoc() const { return EndLoc; } 476 477 ARMCC::CondCodes getCondCode() const { 478 assert(Kind == k_CondCode && "Invalid access!"); 479 return CC.Val; 480 } 481 482 unsigned getCoproc() const { 483 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 484 return Cop.Val; 485 } 486 487 StringRef getToken() const { 488 assert(Kind == k_Token && "Invalid access!"); 489 return StringRef(Tok.Data, Tok.Length); 490 } 491 492 unsigned getReg() const { 493 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 494 return Reg.RegNum; 495 } 496 497 const SmallVectorImpl<unsigned> &getRegList() const { 498 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 499 Kind == k_SPRRegisterList) && "Invalid access!"); 500 return Registers; 501 } 502 503 const MCExpr *getImm() const { 504 assert(Kind == k_Immediate && "Invalid access!"); 505 return Imm.Val; 506 } 507 508 unsigned getFPImm() const { 509 assert(Kind == k_FPImmediate && "Invalid access!"); 510 return FPImm.Val; 511 } 512 513 unsigned getVectorIndex() const { 514 assert(Kind == k_VectorIndex && "Invalid access!"); 515 return VectorIndex.Val; 516 } 517 518 ARM_MB::MemBOpt getMemBarrierOpt() const { 519 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 520 return MBOpt.Val; 521 } 522 523 ARM_PROC::IFlags getProcIFlags() const { 524 assert(Kind == k_ProcIFlags && "Invalid access!"); 525 return IFlags.Val; 526 } 527 528 unsigned getMSRMask() const { 529 assert(Kind == k_MSRMask && "Invalid access!"); 530 return MMask.Val; 531 } 532 533 bool isCoprocNum() const { return Kind == k_CoprocNum; } 534 bool isCoprocReg() const { return Kind == k_CoprocReg; } 535 bool isCoprocOption() const { return Kind == k_CoprocOption; } 536 bool isCondCode() const { return Kind == k_CondCode; } 537 bool isCCOut() const { return Kind == k_CCOut; } 538 bool isITMask() const { return Kind == k_ITCondMask; } 539 bool isITCondCode() const { return Kind == k_CondCode; } 540 bool isImm() const { return Kind == k_Immediate; } 541 bool isFPImm() const { return Kind == k_FPImmediate; } 542 bool isImm8s4() const { 543 if (Kind != k_Immediate) 544 return false; 545 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 546 if (!CE) return false; 547 int64_t Value = CE->getValue(); 548 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 549 } 550 bool isImm0_1020s4() const { 551 if (Kind != k_Immediate) 552 return false; 553 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 554 if (!CE) return false; 555 int64_t Value = CE->getValue(); 556 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 557 } 558 bool isImm0_508s4() const { 559 if (Kind != k_Immediate) 560 return false; 561 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 562 if (!CE) return false; 563 int64_t Value = CE->getValue(); 564 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 565 } 566 bool isImm0_255() const { 567 if (Kind != k_Immediate) 568 return false; 569 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 570 if (!CE) return false; 571 int64_t Value = CE->getValue(); 572 return Value >= 0 && Value < 256; 573 } 574 bool isImm0_1() const { 575 if (Kind != k_Immediate) 576 return false; 577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 578 if (!CE) return false; 579 int64_t Value = CE->getValue(); 580 return Value >= 0 && Value < 2; 581 } 582 bool isImm0_3() const { 583 if (Kind != k_Immediate) 584 return false; 585 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 586 if (!CE) return false; 587 int64_t Value = CE->getValue(); 588 return Value >= 0 && Value < 4; 589 } 590 bool isImm0_7() const { 591 if (Kind != k_Immediate) 592 return false; 593 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 594 if (!CE) return false; 595 int64_t Value = CE->getValue(); 596 return Value >= 0 && Value < 8; 597 } 598 bool isImm0_15() const { 599 if (Kind != k_Immediate) 600 return false; 601 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 602 if (!CE) return false; 603 int64_t Value = CE->getValue(); 604 return Value >= 0 && Value < 16; 605 } 606 bool isImm0_31() const { 607 if (Kind != k_Immediate) 608 return false; 609 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 610 if (!CE) return false; 611 int64_t Value = CE->getValue(); 612 return Value >= 0 && Value < 32; 613 } 614 bool isImm8() const { 615 if (Kind != k_Immediate) 616 return false; 617 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 618 if (!CE) return false; 619 int64_t Value = CE->getValue(); 620 return Value == 8; 621 } 622 bool isImm16() const { 623 if (Kind != k_Immediate) 624 return false; 625 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 626 if (!CE) return false; 627 int64_t Value = CE->getValue(); 628 return Value == 16; 629 } 630 bool isImm32() const { 631 if (Kind != k_Immediate) 632 return false; 633 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 634 if (!CE) return false; 635 int64_t Value = CE->getValue(); 636 return Value == 32; 637 } 638 bool isImm1_7() const { 639 if (Kind != k_Immediate) 640 return false; 641 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 642 if (!CE) return false; 643 int64_t Value = CE->getValue(); 644 return Value > 0 && Value < 8; 645 } 646 bool isImm1_15() const { 647 if (Kind != k_Immediate) 648 return false; 649 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 650 if (!CE) return false; 651 int64_t Value = CE->getValue(); 652 return Value > 0 && Value < 16; 653 } 654 bool isImm1_31() const { 655 if (Kind != k_Immediate) 656 return false; 657 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 658 if (!CE) return false; 659 int64_t Value = CE->getValue(); 660 return Value > 0 && Value < 32; 661 } 662 bool isImm1_16() const { 663 if (Kind != k_Immediate) 664 return false; 665 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 666 if (!CE) return false; 667 int64_t Value = CE->getValue(); 668 return Value > 0 && Value < 17; 669 } 670 bool isImm1_32() const { 671 if (Kind != k_Immediate) 672 return false; 673 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 674 if (!CE) return false; 675 int64_t Value = CE->getValue(); 676 return Value > 0 && Value < 33; 677 } 678 bool isImm0_32() const { 679 if (Kind != k_Immediate) 680 return false; 681 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 682 if (!CE) return false; 683 int64_t Value = CE->getValue(); 684 return Value >= 0 && Value < 33; 685 } 686 bool isImm0_65535() const { 687 if (Kind != k_Immediate) 688 return false; 689 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 690 if (!CE) return false; 691 int64_t Value = CE->getValue(); 692 return Value >= 0 && Value < 65536; 693 } 694 bool isImm0_65535Expr() const { 695 if (Kind != k_Immediate) 696 return false; 697 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 698 // If it's not a constant expression, it'll generate a fixup and be 699 // handled later. 700 if (!CE) return true; 701 int64_t Value = CE->getValue(); 702 return Value >= 0 && Value < 65536; 703 } 704 bool isImm24bit() const { 705 if (Kind != k_Immediate) 706 return false; 707 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 708 if (!CE) return false; 709 int64_t Value = CE->getValue(); 710 return Value >= 0 && Value <= 0xffffff; 711 } 712 bool isImmThumbSR() const { 713 if (Kind != k_Immediate) 714 return false; 715 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 716 if (!CE) return false; 717 int64_t Value = CE->getValue(); 718 return Value > 0 && Value < 33; 719 } 720 bool isPKHLSLImm() const { 721 if (Kind != k_Immediate) 722 return false; 723 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 724 if (!CE) return false; 725 int64_t Value = CE->getValue(); 726 return Value >= 0 && Value < 32; 727 } 728 bool isPKHASRImm() const { 729 if (Kind != k_Immediate) 730 return false; 731 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 732 if (!CE) return false; 733 int64_t Value = CE->getValue(); 734 return Value > 0 && Value <= 32; 735 } 736 bool isARMSOImm() const { 737 if (Kind != k_Immediate) 738 return false; 739 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 740 if (!CE) return false; 741 int64_t Value = CE->getValue(); 742 return ARM_AM::getSOImmVal(Value) != -1; 743 } 744 bool isARMSOImmNot() const { 745 if (Kind != k_Immediate) 746 return false; 747 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 748 if (!CE) return false; 749 int64_t Value = CE->getValue(); 750 return ARM_AM::getSOImmVal(~Value) != -1; 751 } 752 bool isT2SOImm() const { 753 if (Kind != k_Immediate) 754 return false; 755 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 756 if (!CE) return false; 757 int64_t Value = CE->getValue(); 758 return ARM_AM::getT2SOImmVal(Value) != -1; 759 } 760 bool isT2SOImmNot() const { 761 if (Kind != k_Immediate) 762 return false; 763 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 764 if (!CE) return false; 765 int64_t Value = CE->getValue(); 766 return ARM_AM::getT2SOImmVal(~Value) != -1; 767 } 768 bool isSetEndImm() const { 769 if (Kind != k_Immediate) 770 return false; 771 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 772 if (!CE) return false; 773 int64_t Value = CE->getValue(); 774 return Value == 1 || Value == 0; 775 } 776 bool isReg() const { return Kind == k_Register; } 777 bool isRegList() const { return Kind == k_RegisterList; } 778 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 779 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 780 bool isToken() const { return Kind == k_Token; } 781 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 782 bool isMemory() const { return Kind == k_Memory; } 783 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 784 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 785 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 786 bool isRotImm() const { return Kind == k_RotateImmediate; } 787 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 788 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 789 bool isPostIdxReg() const { 790 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; 791 } 792 bool isMemNoOffset(bool alignOK = false) const { 793 if (!isMemory()) 794 return false; 795 // No offset of any kind. 796 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 797 (alignOK || Memory.Alignment == 0); 798 } 799 bool isAlignedMemory() const { 800 return isMemNoOffset(true); 801 } 802 bool isAddrMode2() const { 803 if (!isMemory() || Memory.Alignment != 0) return false; 804 // Check for register offset. 805 if (Memory.OffsetRegNum) return true; 806 // Immediate offset in range [-4095, 4095]. 807 if (!Memory.OffsetImm) return true; 808 int64_t Val = Memory.OffsetImm->getValue(); 809 return Val > -4096 && Val < 4096; 810 } 811 bool isAM2OffsetImm() const { 812 if (Kind != k_Immediate) 813 return false; 814 // Immediate offset in range [-4095, 4095]. 815 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 816 if (!CE) return false; 817 int64_t Val = CE->getValue(); 818 return Val > -4096 && Val < 4096; 819 } 820 bool isAddrMode3() const { 821 if (!isMemory() || Memory.Alignment != 0) return false; 822 // No shifts are legal for AM3. 823 if (Memory.ShiftType != ARM_AM::no_shift) return false; 824 // Check for register offset. 825 if (Memory.OffsetRegNum) return true; 826 // Immediate offset in range [-255, 255]. 827 if (!Memory.OffsetImm) return true; 828 int64_t Val = Memory.OffsetImm->getValue(); 829 return Val > -256 && Val < 256; 830 } 831 bool isAM3Offset() const { 832 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 833 return false; 834 if (Kind == k_PostIndexRegister) 835 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 836 // Immediate offset in range [-255, 255]. 837 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 838 if (!CE) return false; 839 int64_t Val = CE->getValue(); 840 // Special case, #-0 is INT32_MIN. 841 return (Val > -256 && Val < 256) || Val == INT32_MIN; 842 } 843 bool isAddrMode5() const { 844 // If we have an immediate that's not a constant, treat it as a label 845 // reference needing a fixup. If it is a constant, it's something else 846 // and we reject it. 847 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 848 return true; 849 if (!isMemory() || Memory.Alignment != 0) return false; 850 // Check for register offset. 851 if (Memory.OffsetRegNum) return false; 852 // Immediate offset in range [-1020, 1020] and a multiple of 4. 853 if (!Memory.OffsetImm) return true; 854 int64_t Val = Memory.OffsetImm->getValue(); 855 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 856 Val == INT32_MIN; 857 } 858 bool isMemTBB() const { 859 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 860 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 861 return false; 862 return true; 863 } 864 bool isMemTBH() const { 865 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 866 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 867 Memory.Alignment != 0 ) 868 return false; 869 return true; 870 } 871 bool isMemRegOffset() const { 872 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 873 return false; 874 return true; 875 } 876 bool isT2MemRegOffset() const { 877 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 878 Memory.Alignment != 0) 879 return false; 880 // Only lsl #{0, 1, 2, 3} allowed. 881 if (Memory.ShiftType == ARM_AM::no_shift) 882 return true; 883 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 884 return false; 885 return true; 886 } 887 bool isMemThumbRR() const { 888 // Thumb reg+reg addressing is simple. Just two registers, a base and 889 // an offset. No shifts, negations or any other complicating factors. 890 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 891 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 892 return false; 893 return isARMLowRegister(Memory.BaseRegNum) && 894 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 895 } 896 bool isMemThumbRIs4() const { 897 if (!isMemory() || Memory.OffsetRegNum != 0 || 898 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 899 return false; 900 // Immediate offset, multiple of 4 in range [0, 124]. 901 if (!Memory.OffsetImm) return true; 902 int64_t Val = Memory.OffsetImm->getValue(); 903 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 904 } 905 bool isMemThumbRIs2() const { 906 if (!isMemory() || Memory.OffsetRegNum != 0 || 907 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 908 return false; 909 // Immediate offset, multiple of 4 in range [0, 62]. 910 if (!Memory.OffsetImm) return true; 911 int64_t Val = Memory.OffsetImm->getValue(); 912 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 913 } 914 bool isMemThumbRIs1() const { 915 if (!isMemory() || Memory.OffsetRegNum != 0 || 916 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 917 return false; 918 // Immediate offset in range [0, 31]. 919 if (!Memory.OffsetImm) return true; 920 int64_t Val = Memory.OffsetImm->getValue(); 921 return Val >= 0 && Val <= 31; 922 } 923 bool isMemThumbSPI() const { 924 if (!isMemory() || Memory.OffsetRegNum != 0 || 925 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 926 return false; 927 // Immediate offset, multiple of 4 in range [0, 1020]. 928 if (!Memory.OffsetImm) return true; 929 int64_t Val = Memory.OffsetImm->getValue(); 930 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 931 } 932 bool isMemImm8s4Offset() const { 933 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 934 return false; 935 // Immediate offset a multiple of 4 in range [-1020, 1020]. 936 if (!Memory.OffsetImm) return true; 937 int64_t Val = Memory.OffsetImm->getValue(); 938 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 939 } 940 bool isMemImm0_1020s4Offset() const { 941 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 942 return false; 943 // Immediate offset a multiple of 4 in range [0, 1020]. 944 if (!Memory.OffsetImm) return true; 945 int64_t Val = Memory.OffsetImm->getValue(); 946 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 947 } 948 bool isMemImm8Offset() const { 949 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 950 return false; 951 // Immediate offset in range [-255, 255]. 952 if (!Memory.OffsetImm) return true; 953 int64_t Val = Memory.OffsetImm->getValue(); 954 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 955 } 956 bool isMemPosImm8Offset() const { 957 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 958 return false; 959 // Immediate offset in range [0, 255]. 960 if (!Memory.OffsetImm) return true; 961 int64_t Val = Memory.OffsetImm->getValue(); 962 return Val >= 0 && Val < 256; 963 } 964 bool isMemNegImm8Offset() const { 965 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 966 return false; 967 // Immediate offset in range [-255, -1]. 968 if (!Memory.OffsetImm) return false; 969 int64_t Val = Memory.OffsetImm->getValue(); 970 return (Val == INT32_MIN) || (Val > -256 && Val < 0); 971 } 972 bool isMemUImm12Offset() const { 973 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 974 return false; 975 // Immediate offset in range [0, 4095]. 976 if (!Memory.OffsetImm) return true; 977 int64_t Val = Memory.OffsetImm->getValue(); 978 return (Val >= 0 && Val < 4096); 979 } 980 bool isMemImm12Offset() const { 981 // If we have an immediate that's not a constant, treat it as a label 982 // reference needing a fixup. If it is a constant, it's something else 983 // and we reject it. 984 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 985 return true; 986 987 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 988 return false; 989 // Immediate offset in range [-4095, 4095]. 990 if (!Memory.OffsetImm) return true; 991 int64_t Val = Memory.OffsetImm->getValue(); 992 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 993 } 994 bool isPostIdxImm8() const { 995 if (Kind != k_Immediate) 996 return false; 997 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 998 if (!CE) return false; 999 int64_t Val = CE->getValue(); 1000 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 1001 } 1002 bool isPostIdxImm8s4() const { 1003 if (Kind != k_Immediate) 1004 return false; 1005 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1006 if (!CE) return false; 1007 int64_t Val = CE->getValue(); 1008 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 1009 (Val == INT32_MIN); 1010 } 1011 1012 bool isMSRMask() const { return Kind == k_MSRMask; } 1013 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 1014 1015 // NEON operands. 1016 bool isVecListOneD() const { 1017 if (Kind != k_VectorList) return false; 1018 return VectorList.Count == 1; 1019 } 1020 1021 bool isVecListTwoD() const { 1022 if (Kind != k_VectorList) return false; 1023 return VectorList.Count == 2; 1024 } 1025 1026 bool isVecListThreeD() const { 1027 if (Kind != k_VectorList) return false; 1028 return VectorList.Count == 3; 1029 } 1030 1031 bool isVecListFourD() const { 1032 if (Kind != k_VectorList) return false; 1033 return VectorList.Count == 4; 1034 } 1035 1036 bool isVecListTwoQ() const { 1037 if (Kind != k_VectorList) return false; 1038 //FIXME: We haven't taught the parser to handle by-two register lists 1039 // yet, so don't pretend to know one. 1040 return VectorList.Count == 2 && false; 1041 } 1042 1043 bool isVecListOneDAllLanes() const { 1044 if (Kind != k_VectorListAllLanes) return false; 1045 return VectorList.Count == 1; 1046 } 1047 1048 bool isVecListTwoDAllLanes() const { 1049 if (Kind != k_VectorListAllLanes) return false; 1050 return VectorList.Count == 2; 1051 } 1052 1053 bool isVecListOneDByteIndexed() const { 1054 if (Kind != k_VectorListIndexed) return false; 1055 return VectorList.Count == 1 && VectorList.LaneIndex <= 7; 1056 } 1057 1058 bool isVectorIndex8() const { 1059 if (Kind != k_VectorIndex) return false; 1060 return VectorIndex.Val < 8; 1061 } 1062 bool isVectorIndex16() const { 1063 if (Kind != k_VectorIndex) return false; 1064 return VectorIndex.Val < 4; 1065 } 1066 bool isVectorIndex32() const { 1067 if (Kind != k_VectorIndex) return false; 1068 return VectorIndex.Val < 2; 1069 } 1070 1071 bool isNEONi8splat() const { 1072 if (Kind != k_Immediate) 1073 return false; 1074 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1075 // Must be a constant. 1076 if (!CE) return false; 1077 int64_t Value = CE->getValue(); 1078 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 1079 // value. 1080 return Value >= 0 && Value < 256; 1081 } 1082 1083 bool isNEONi16splat() const { 1084 if (Kind != k_Immediate) 1085 return false; 1086 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1087 // Must be a constant. 1088 if (!CE) return false; 1089 int64_t Value = CE->getValue(); 1090 // i16 value in the range [0,255] or [0x0100, 0xff00] 1091 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 1092 } 1093 1094 bool isNEONi32splat() const { 1095 if (Kind != k_Immediate) 1096 return false; 1097 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1098 // Must be a constant. 1099 if (!CE) return false; 1100 int64_t Value = CE->getValue(); 1101 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 1102 return (Value >= 0 && Value < 256) || 1103 (Value >= 0x0100 && Value <= 0xff00) || 1104 (Value >= 0x010000 && Value <= 0xff0000) || 1105 (Value >= 0x01000000 && Value <= 0xff000000); 1106 } 1107 1108 bool isNEONi32vmov() const { 1109 if (Kind != k_Immediate) 1110 return false; 1111 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1112 // Must be a constant. 1113 if (!CE) return false; 1114 int64_t Value = CE->getValue(); 1115 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1116 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1117 return (Value >= 0 && Value < 256) || 1118 (Value >= 0x0100 && Value <= 0xff00) || 1119 (Value >= 0x010000 && Value <= 0xff0000) || 1120 (Value >= 0x01000000 && Value <= 0xff000000) || 1121 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1122 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1123 } 1124 1125 bool isNEONi64splat() const { 1126 if (Kind != k_Immediate) 1127 return false; 1128 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1129 // Must be a constant. 1130 if (!CE) return false; 1131 uint64_t Value = CE->getValue(); 1132 // i64 value with each byte being either 0 or 0xff. 1133 for (unsigned i = 0; i < 8; ++i) 1134 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1135 return true; 1136 } 1137 1138 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1139 // Add as immediates when possible. Null MCExpr = 0. 1140 if (Expr == 0) 1141 Inst.addOperand(MCOperand::CreateImm(0)); 1142 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1143 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1144 else 1145 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1146 } 1147 1148 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1149 assert(N == 2 && "Invalid number of operands!"); 1150 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1151 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1152 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1153 } 1154 1155 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1156 assert(N == 1 && "Invalid number of operands!"); 1157 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1158 } 1159 1160 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1161 assert(N == 1 && "Invalid number of operands!"); 1162 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1163 } 1164 1165 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1166 assert(N == 1 && "Invalid number of operands!"); 1167 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1168 } 1169 1170 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1171 assert(N == 1 && "Invalid number of operands!"); 1172 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1173 } 1174 1175 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1176 assert(N == 1 && "Invalid number of operands!"); 1177 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1178 } 1179 1180 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1181 assert(N == 1 && "Invalid number of operands!"); 1182 Inst.addOperand(MCOperand::CreateReg(getReg())); 1183 } 1184 1185 void addRegOperands(MCInst &Inst, unsigned N) const { 1186 assert(N == 1 && "Invalid number of operands!"); 1187 Inst.addOperand(MCOperand::CreateReg(getReg())); 1188 } 1189 1190 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1191 assert(N == 3 && "Invalid number of operands!"); 1192 assert(isRegShiftedReg() && 1193 "addRegShiftedRegOperands() on non RegShiftedReg!"); 1194 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1195 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1196 Inst.addOperand(MCOperand::CreateImm( 1197 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1198 } 1199 1200 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1201 assert(N == 2 && "Invalid number of operands!"); 1202 assert(isRegShiftedImm() && 1203 "addRegShiftedImmOperands() on non RegShiftedImm!"); 1204 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1205 Inst.addOperand(MCOperand::CreateImm( 1206 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1207 } 1208 1209 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1210 assert(N == 1 && "Invalid number of operands!"); 1211 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1212 ShifterImm.Imm)); 1213 } 1214 1215 void addRegListOperands(MCInst &Inst, unsigned N) const { 1216 assert(N == 1 && "Invalid number of operands!"); 1217 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1218 for (SmallVectorImpl<unsigned>::const_iterator 1219 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1220 Inst.addOperand(MCOperand::CreateReg(*I)); 1221 } 1222 1223 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1224 addRegListOperands(Inst, N); 1225 } 1226 1227 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1228 addRegListOperands(Inst, N); 1229 } 1230 1231 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1232 assert(N == 1 && "Invalid number of operands!"); 1233 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1234 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1235 } 1236 1237 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1238 assert(N == 1 && "Invalid number of operands!"); 1239 // Munge the lsb/width into a bitfield mask. 1240 unsigned lsb = Bitfield.LSB; 1241 unsigned width = Bitfield.Width; 1242 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1243 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1244 (32 - (lsb + width))); 1245 Inst.addOperand(MCOperand::CreateImm(Mask)); 1246 } 1247 1248 void addImmOperands(MCInst &Inst, unsigned N) const { 1249 assert(N == 1 && "Invalid number of operands!"); 1250 addExpr(Inst, getImm()); 1251 } 1252 1253 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1254 assert(N == 1 && "Invalid number of operands!"); 1255 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1256 } 1257 1258 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1259 assert(N == 1 && "Invalid number of operands!"); 1260 // FIXME: We really want to scale the value here, but the LDRD/STRD 1261 // instruction don't encode operands that way yet. 1262 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1263 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1264 } 1265 1266 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1267 assert(N == 1 && "Invalid number of operands!"); 1268 // The immediate is scaled by four in the encoding and is stored 1269 // in the MCInst as such. Lop off the low two bits here. 1270 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1271 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1272 } 1273 1274 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1275 assert(N == 1 && "Invalid number of operands!"); 1276 // The immediate is scaled by four in the encoding and is stored 1277 // in the MCInst as such. Lop off the low two bits here. 1278 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1279 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1280 } 1281 1282 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1283 assert(N == 1 && "Invalid number of operands!"); 1284 // The constant encodes as the immediate-1, and we store in the instruction 1285 // the bits as encoded, so subtract off one here. 1286 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1287 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1288 } 1289 1290 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1291 assert(N == 1 && "Invalid number of operands!"); 1292 // The constant encodes as the immediate-1, and we store in the instruction 1293 // the bits as encoded, so subtract off one here. 1294 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1295 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1296 } 1297 1298 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1299 assert(N == 1 && "Invalid number of operands!"); 1300 // The constant encodes as the immediate, except for 32, which encodes as 1301 // zero. 1302 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1303 unsigned Imm = CE->getValue(); 1304 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1305 } 1306 1307 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1308 assert(N == 1 && "Invalid number of operands!"); 1309 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1310 // the instruction as well. 1311 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1312 int Val = CE->getValue(); 1313 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1314 } 1315 1316 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 1317 assert(N == 1 && "Invalid number of operands!"); 1318 // The operand is actually a t2_so_imm, but we have its bitwise 1319 // negation in the assembly source, so twiddle it here. 1320 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1321 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1322 } 1323 1324 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 1325 assert(N == 1 && "Invalid number of operands!"); 1326 // The operand is actually a so_imm, but we have its bitwise 1327 // negation in the assembly source, so twiddle it here. 1328 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1329 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1330 } 1331 1332 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1333 assert(N == 1 && "Invalid number of operands!"); 1334 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1335 } 1336 1337 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1338 assert(N == 1 && "Invalid number of operands!"); 1339 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1340 } 1341 1342 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1343 assert(N == 2 && "Invalid number of operands!"); 1344 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1345 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1346 } 1347 1348 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1349 assert(N == 3 && "Invalid number of operands!"); 1350 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1351 if (!Memory.OffsetRegNum) { 1352 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1353 // Special case for #-0 1354 if (Val == INT32_MIN) Val = 0; 1355 if (Val < 0) Val = -Val; 1356 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1357 } else { 1358 // For register offset, we encode the shift type and negation flag 1359 // here. 1360 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1361 Memory.ShiftImm, Memory.ShiftType); 1362 } 1363 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1364 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1365 Inst.addOperand(MCOperand::CreateImm(Val)); 1366 } 1367 1368 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1369 assert(N == 2 && "Invalid number of operands!"); 1370 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1371 assert(CE && "non-constant AM2OffsetImm operand!"); 1372 int32_t Val = CE->getValue(); 1373 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1374 // Special case for #-0 1375 if (Val == INT32_MIN) Val = 0; 1376 if (Val < 0) Val = -Val; 1377 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1378 Inst.addOperand(MCOperand::CreateReg(0)); 1379 Inst.addOperand(MCOperand::CreateImm(Val)); 1380 } 1381 1382 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1383 assert(N == 3 && "Invalid number of operands!"); 1384 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1385 if (!Memory.OffsetRegNum) { 1386 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1387 // Special case for #-0 1388 if (Val == INT32_MIN) Val = 0; 1389 if (Val < 0) Val = -Val; 1390 Val = ARM_AM::getAM3Opc(AddSub, Val); 1391 } else { 1392 // For register offset, we encode the shift type and negation flag 1393 // here. 1394 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1395 } 1396 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1397 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1398 Inst.addOperand(MCOperand::CreateImm(Val)); 1399 } 1400 1401 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1402 assert(N == 2 && "Invalid number of operands!"); 1403 if (Kind == k_PostIndexRegister) { 1404 int32_t Val = 1405 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1406 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1407 Inst.addOperand(MCOperand::CreateImm(Val)); 1408 return; 1409 } 1410 1411 // Constant offset. 1412 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1413 int32_t Val = CE->getValue(); 1414 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1415 // Special case for #-0 1416 if (Val == INT32_MIN) Val = 0; 1417 if (Val < 0) Val = -Val; 1418 Val = ARM_AM::getAM3Opc(AddSub, Val); 1419 Inst.addOperand(MCOperand::CreateReg(0)); 1420 Inst.addOperand(MCOperand::CreateImm(Val)); 1421 } 1422 1423 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1424 assert(N == 2 && "Invalid number of operands!"); 1425 // If we have an immediate that's not a constant, treat it as a label 1426 // reference needing a fixup. If it is a constant, it's something else 1427 // and we reject it. 1428 if (isImm()) { 1429 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1430 Inst.addOperand(MCOperand::CreateImm(0)); 1431 return; 1432 } 1433 1434 // The lower two bits are always zero and as such are not encoded. 1435 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1436 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1437 // Special case for #-0 1438 if (Val == INT32_MIN) Val = 0; 1439 if (Val < 0) Val = -Val; 1440 Val = ARM_AM::getAM5Opc(AddSub, Val); 1441 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1442 Inst.addOperand(MCOperand::CreateImm(Val)); 1443 } 1444 1445 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1446 assert(N == 2 && "Invalid number of operands!"); 1447 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1448 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1449 Inst.addOperand(MCOperand::CreateImm(Val)); 1450 } 1451 1452 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1453 assert(N == 2 && "Invalid number of operands!"); 1454 // The lower two bits are always zero and as such are not encoded. 1455 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1456 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1457 Inst.addOperand(MCOperand::CreateImm(Val)); 1458 } 1459 1460 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1461 assert(N == 2 && "Invalid number of operands!"); 1462 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1463 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1464 Inst.addOperand(MCOperand::CreateImm(Val)); 1465 } 1466 1467 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1468 addMemImm8OffsetOperands(Inst, N); 1469 } 1470 1471 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1472 addMemImm8OffsetOperands(Inst, N); 1473 } 1474 1475 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1476 assert(N == 2 && "Invalid number of operands!"); 1477 // If this is an immediate, it's a label reference. 1478 if (Kind == k_Immediate) { 1479 addExpr(Inst, getImm()); 1480 Inst.addOperand(MCOperand::CreateImm(0)); 1481 return; 1482 } 1483 1484 // Otherwise, it's a normal memory reg+offset. 1485 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1486 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1487 Inst.addOperand(MCOperand::CreateImm(Val)); 1488 } 1489 1490 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1491 assert(N == 2 && "Invalid number of operands!"); 1492 // If this is an immediate, it's a label reference. 1493 if (Kind == k_Immediate) { 1494 addExpr(Inst, getImm()); 1495 Inst.addOperand(MCOperand::CreateImm(0)); 1496 return; 1497 } 1498 1499 // Otherwise, it's a normal memory reg+offset. 1500 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1501 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1502 Inst.addOperand(MCOperand::CreateImm(Val)); 1503 } 1504 1505 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1506 assert(N == 2 && "Invalid number of operands!"); 1507 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1508 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1509 } 1510 1511 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1512 assert(N == 2 && "Invalid number of operands!"); 1513 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1514 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1515 } 1516 1517 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1518 assert(N == 3 && "Invalid number of operands!"); 1519 unsigned Val = 1520 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1521 Memory.ShiftImm, Memory.ShiftType); 1522 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1523 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1524 Inst.addOperand(MCOperand::CreateImm(Val)); 1525 } 1526 1527 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1528 assert(N == 3 && "Invalid number of operands!"); 1529 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1530 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1531 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1532 } 1533 1534 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1535 assert(N == 2 && "Invalid number of operands!"); 1536 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1537 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1538 } 1539 1540 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1541 assert(N == 2 && "Invalid number of operands!"); 1542 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1543 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1544 Inst.addOperand(MCOperand::CreateImm(Val)); 1545 } 1546 1547 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1548 assert(N == 2 && "Invalid number of operands!"); 1549 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1550 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1551 Inst.addOperand(MCOperand::CreateImm(Val)); 1552 } 1553 1554 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1555 assert(N == 2 && "Invalid number of operands!"); 1556 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1557 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1558 Inst.addOperand(MCOperand::CreateImm(Val)); 1559 } 1560 1561 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1562 assert(N == 2 && "Invalid number of operands!"); 1563 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1564 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1565 Inst.addOperand(MCOperand::CreateImm(Val)); 1566 } 1567 1568 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1569 assert(N == 1 && "Invalid number of operands!"); 1570 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1571 assert(CE && "non-constant post-idx-imm8 operand!"); 1572 int Imm = CE->getValue(); 1573 bool isAdd = Imm >= 0; 1574 if (Imm == INT32_MIN) Imm = 0; 1575 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1576 Inst.addOperand(MCOperand::CreateImm(Imm)); 1577 } 1578 1579 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1580 assert(N == 1 && "Invalid number of operands!"); 1581 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1582 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1583 int Imm = CE->getValue(); 1584 bool isAdd = Imm >= 0; 1585 if (Imm == INT32_MIN) Imm = 0; 1586 // Immediate is scaled by 4. 1587 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1588 Inst.addOperand(MCOperand::CreateImm(Imm)); 1589 } 1590 1591 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1592 assert(N == 2 && "Invalid number of operands!"); 1593 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1594 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1595 } 1596 1597 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1598 assert(N == 2 && "Invalid number of operands!"); 1599 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1600 // The sign, shift type, and shift amount are encoded in a single operand 1601 // using the AM2 encoding helpers. 1602 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1603 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1604 PostIdxReg.ShiftTy); 1605 Inst.addOperand(MCOperand::CreateImm(Imm)); 1606 } 1607 1608 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1609 assert(N == 1 && "Invalid number of operands!"); 1610 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1611 } 1612 1613 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1614 assert(N == 1 && "Invalid number of operands!"); 1615 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1616 } 1617 1618 void addVecListOperands(MCInst &Inst, unsigned N) const { 1619 assert(N == 1 && "Invalid number of operands!"); 1620 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1621 } 1622 1623 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const { 1624 assert(N == 2 && "Invalid number of operands!"); 1625 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1626 Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex)); 1627 } 1628 1629 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1630 assert(N == 1 && "Invalid number of operands!"); 1631 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1632 } 1633 1634 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1635 assert(N == 1 && "Invalid number of operands!"); 1636 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1637 } 1638 1639 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1640 assert(N == 1 && "Invalid number of operands!"); 1641 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1642 } 1643 1644 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1645 assert(N == 1 && "Invalid number of operands!"); 1646 // The immediate encodes the type of constant as well as the value. 1647 // Mask in that this is an i8 splat. 1648 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1649 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1650 } 1651 1652 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1653 assert(N == 1 && "Invalid number of operands!"); 1654 // The immediate encodes the type of constant as well as the value. 1655 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1656 unsigned Value = CE->getValue(); 1657 if (Value >= 256) 1658 Value = (Value >> 8) | 0xa00; 1659 else 1660 Value |= 0x800; 1661 Inst.addOperand(MCOperand::CreateImm(Value)); 1662 } 1663 1664 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1665 assert(N == 1 && "Invalid number of operands!"); 1666 // The immediate encodes the type of constant as well as the value. 1667 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1668 unsigned Value = CE->getValue(); 1669 if (Value >= 256 && Value <= 0xff00) 1670 Value = (Value >> 8) | 0x200; 1671 else if (Value > 0xffff && Value <= 0xff0000) 1672 Value = (Value >> 16) | 0x400; 1673 else if (Value > 0xffffff) 1674 Value = (Value >> 24) | 0x600; 1675 Inst.addOperand(MCOperand::CreateImm(Value)); 1676 } 1677 1678 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1679 assert(N == 1 && "Invalid number of operands!"); 1680 // The immediate encodes the type of constant as well as the value. 1681 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1682 unsigned Value = CE->getValue(); 1683 if (Value >= 256 && Value <= 0xffff) 1684 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1685 else if (Value > 0xffff && Value <= 0xffffff) 1686 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1687 else if (Value > 0xffffff) 1688 Value = (Value >> 24) | 0x600; 1689 Inst.addOperand(MCOperand::CreateImm(Value)); 1690 } 1691 1692 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1693 assert(N == 1 && "Invalid number of operands!"); 1694 // The immediate encodes the type of constant as well as the value. 1695 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1696 uint64_t Value = CE->getValue(); 1697 unsigned Imm = 0; 1698 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1699 Imm |= (Value & 1) << i; 1700 } 1701 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1702 } 1703 1704 virtual void print(raw_ostream &OS) const; 1705 1706 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1707 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1708 Op->ITMask.Mask = Mask; 1709 Op->StartLoc = S; 1710 Op->EndLoc = S; 1711 return Op; 1712 } 1713 1714 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1715 ARMOperand *Op = new ARMOperand(k_CondCode); 1716 Op->CC.Val = CC; 1717 Op->StartLoc = S; 1718 Op->EndLoc = S; 1719 return Op; 1720 } 1721 1722 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1723 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1724 Op->Cop.Val = CopVal; 1725 Op->StartLoc = S; 1726 Op->EndLoc = S; 1727 return Op; 1728 } 1729 1730 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1731 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1732 Op->Cop.Val = CopVal; 1733 Op->StartLoc = S; 1734 Op->EndLoc = S; 1735 return Op; 1736 } 1737 1738 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1739 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1740 Op->Cop.Val = Val; 1741 Op->StartLoc = S; 1742 Op->EndLoc = E; 1743 return Op; 1744 } 1745 1746 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1747 ARMOperand *Op = new ARMOperand(k_CCOut); 1748 Op->Reg.RegNum = RegNum; 1749 Op->StartLoc = S; 1750 Op->EndLoc = S; 1751 return Op; 1752 } 1753 1754 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1755 ARMOperand *Op = new ARMOperand(k_Token); 1756 Op->Tok.Data = Str.data(); 1757 Op->Tok.Length = Str.size(); 1758 Op->StartLoc = S; 1759 Op->EndLoc = S; 1760 return Op; 1761 } 1762 1763 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1764 ARMOperand *Op = new ARMOperand(k_Register); 1765 Op->Reg.RegNum = RegNum; 1766 Op->StartLoc = S; 1767 Op->EndLoc = E; 1768 return Op; 1769 } 1770 1771 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1772 unsigned SrcReg, 1773 unsigned ShiftReg, 1774 unsigned ShiftImm, 1775 SMLoc S, SMLoc E) { 1776 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1777 Op->RegShiftedReg.ShiftTy = ShTy; 1778 Op->RegShiftedReg.SrcReg = SrcReg; 1779 Op->RegShiftedReg.ShiftReg = ShiftReg; 1780 Op->RegShiftedReg.ShiftImm = ShiftImm; 1781 Op->StartLoc = S; 1782 Op->EndLoc = E; 1783 return Op; 1784 } 1785 1786 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1787 unsigned SrcReg, 1788 unsigned ShiftImm, 1789 SMLoc S, SMLoc E) { 1790 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1791 Op->RegShiftedImm.ShiftTy = ShTy; 1792 Op->RegShiftedImm.SrcReg = SrcReg; 1793 Op->RegShiftedImm.ShiftImm = ShiftImm; 1794 Op->StartLoc = S; 1795 Op->EndLoc = E; 1796 return Op; 1797 } 1798 1799 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1800 SMLoc S, SMLoc E) { 1801 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1802 Op->ShifterImm.isASR = isASR; 1803 Op->ShifterImm.Imm = Imm; 1804 Op->StartLoc = S; 1805 Op->EndLoc = E; 1806 return Op; 1807 } 1808 1809 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1810 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1811 Op->RotImm.Imm = Imm; 1812 Op->StartLoc = S; 1813 Op->EndLoc = E; 1814 return Op; 1815 } 1816 1817 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1818 SMLoc S, SMLoc E) { 1819 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1820 Op->Bitfield.LSB = LSB; 1821 Op->Bitfield.Width = Width; 1822 Op->StartLoc = S; 1823 Op->EndLoc = E; 1824 return Op; 1825 } 1826 1827 static ARMOperand * 1828 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1829 SMLoc StartLoc, SMLoc EndLoc) { 1830 KindTy Kind = k_RegisterList; 1831 1832 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1833 Kind = k_DPRRegisterList; 1834 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1835 contains(Regs.front().first)) 1836 Kind = k_SPRRegisterList; 1837 1838 ARMOperand *Op = new ARMOperand(Kind); 1839 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1840 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1841 Op->Registers.push_back(I->first); 1842 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1843 Op->StartLoc = StartLoc; 1844 Op->EndLoc = EndLoc; 1845 return Op; 1846 } 1847 1848 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1849 SMLoc S, SMLoc E) { 1850 ARMOperand *Op = new ARMOperand(k_VectorList); 1851 Op->VectorList.RegNum = RegNum; 1852 Op->VectorList.Count = Count; 1853 Op->StartLoc = S; 1854 Op->EndLoc = E; 1855 return Op; 1856 } 1857 1858 static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count, 1859 SMLoc S, SMLoc E) { 1860 ARMOperand *Op = new ARMOperand(k_VectorListAllLanes); 1861 Op->VectorList.RegNum = RegNum; 1862 Op->VectorList.Count = Count; 1863 Op->StartLoc = S; 1864 Op->EndLoc = E; 1865 return Op; 1866 } 1867 1868 static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count, 1869 unsigned Index, SMLoc S, SMLoc E) { 1870 ARMOperand *Op = new ARMOperand(k_VectorListIndexed); 1871 Op->VectorList.RegNum = RegNum; 1872 Op->VectorList.Count = Count; 1873 Op->VectorList.LaneIndex = Index; 1874 Op->StartLoc = S; 1875 Op->EndLoc = E; 1876 return Op; 1877 } 1878 1879 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1880 MCContext &Ctx) { 1881 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1882 Op->VectorIndex.Val = Idx; 1883 Op->StartLoc = S; 1884 Op->EndLoc = E; 1885 return Op; 1886 } 1887 1888 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1889 ARMOperand *Op = new ARMOperand(k_Immediate); 1890 Op->Imm.Val = Val; 1891 Op->StartLoc = S; 1892 Op->EndLoc = E; 1893 return Op; 1894 } 1895 1896 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1897 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1898 Op->FPImm.Val = Val; 1899 Op->StartLoc = S; 1900 Op->EndLoc = S; 1901 return Op; 1902 } 1903 1904 static ARMOperand *CreateMem(unsigned BaseRegNum, 1905 const MCConstantExpr *OffsetImm, 1906 unsigned OffsetRegNum, 1907 ARM_AM::ShiftOpc ShiftType, 1908 unsigned ShiftImm, 1909 unsigned Alignment, 1910 bool isNegative, 1911 SMLoc S, SMLoc E) { 1912 ARMOperand *Op = new ARMOperand(k_Memory); 1913 Op->Memory.BaseRegNum = BaseRegNum; 1914 Op->Memory.OffsetImm = OffsetImm; 1915 Op->Memory.OffsetRegNum = OffsetRegNum; 1916 Op->Memory.ShiftType = ShiftType; 1917 Op->Memory.ShiftImm = ShiftImm; 1918 Op->Memory.Alignment = Alignment; 1919 Op->Memory.isNegative = isNegative; 1920 Op->StartLoc = S; 1921 Op->EndLoc = E; 1922 return Op; 1923 } 1924 1925 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1926 ARM_AM::ShiftOpc ShiftTy, 1927 unsigned ShiftImm, 1928 SMLoc S, SMLoc E) { 1929 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1930 Op->PostIdxReg.RegNum = RegNum; 1931 Op->PostIdxReg.isAdd = isAdd; 1932 Op->PostIdxReg.ShiftTy = ShiftTy; 1933 Op->PostIdxReg.ShiftImm = ShiftImm; 1934 Op->StartLoc = S; 1935 Op->EndLoc = E; 1936 return Op; 1937 } 1938 1939 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1940 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1941 Op->MBOpt.Val = Opt; 1942 Op->StartLoc = S; 1943 Op->EndLoc = S; 1944 return Op; 1945 } 1946 1947 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1948 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1949 Op->IFlags.Val = IFlags; 1950 Op->StartLoc = S; 1951 Op->EndLoc = S; 1952 return Op; 1953 } 1954 1955 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1956 ARMOperand *Op = new ARMOperand(k_MSRMask); 1957 Op->MMask.Val = MMask; 1958 Op->StartLoc = S; 1959 Op->EndLoc = S; 1960 return Op; 1961 } 1962 }; 1963 1964 } // end anonymous namespace. 1965 1966 void ARMOperand::print(raw_ostream &OS) const { 1967 switch (Kind) { 1968 case k_FPImmediate: 1969 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1970 << ") >"; 1971 break; 1972 case k_CondCode: 1973 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1974 break; 1975 case k_CCOut: 1976 OS << "<ccout " << getReg() << ">"; 1977 break; 1978 case k_ITCondMask: { 1979 static const char *MaskStr[] = { 1980 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 1981 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 1982 }; 1983 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1984 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1985 break; 1986 } 1987 case k_CoprocNum: 1988 OS << "<coprocessor number: " << getCoproc() << ">"; 1989 break; 1990 case k_CoprocReg: 1991 OS << "<coprocessor register: " << getCoproc() << ">"; 1992 break; 1993 case k_CoprocOption: 1994 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1995 break; 1996 case k_MSRMask: 1997 OS << "<mask: " << getMSRMask() << ">"; 1998 break; 1999 case k_Immediate: 2000 getImm()->print(OS); 2001 break; 2002 case k_MemBarrierOpt: 2003 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 2004 break; 2005 case k_Memory: 2006 OS << "<memory " 2007 << " base:" << Memory.BaseRegNum; 2008 OS << ">"; 2009 break; 2010 case k_PostIndexRegister: 2011 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 2012 << PostIdxReg.RegNum; 2013 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 2014 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 2015 << PostIdxReg.ShiftImm; 2016 OS << ">"; 2017 break; 2018 case k_ProcIFlags: { 2019 OS << "<ARM_PROC::"; 2020 unsigned IFlags = getProcIFlags(); 2021 for (int i=2; i >= 0; --i) 2022 if (IFlags & (1 << i)) 2023 OS << ARM_PROC::IFlagsToString(1 << i); 2024 OS << ">"; 2025 break; 2026 } 2027 case k_Register: 2028 OS << "<register " << getReg() << ">"; 2029 break; 2030 case k_ShifterImmediate: 2031 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 2032 << " #" << ShifterImm.Imm << ">"; 2033 break; 2034 case k_ShiftedRegister: 2035 OS << "<so_reg_reg " 2036 << RegShiftedReg.SrcReg << " " 2037 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) 2038 << " " << RegShiftedReg.ShiftReg << ">"; 2039 break; 2040 case k_ShiftedImmediate: 2041 OS << "<so_reg_imm " 2042 << RegShiftedImm.SrcReg << " " 2043 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) 2044 << " #" << RegShiftedImm.ShiftImm << ">"; 2045 break; 2046 case k_RotateImmediate: 2047 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 2048 break; 2049 case k_BitfieldDescriptor: 2050 OS << "<bitfield " << "lsb: " << Bitfield.LSB 2051 << ", width: " << Bitfield.Width << ">"; 2052 break; 2053 case k_RegisterList: 2054 case k_DPRRegisterList: 2055 case k_SPRRegisterList: { 2056 OS << "<register_list "; 2057 2058 const SmallVectorImpl<unsigned> &RegList = getRegList(); 2059 for (SmallVectorImpl<unsigned>::const_iterator 2060 I = RegList.begin(), E = RegList.end(); I != E; ) { 2061 OS << *I; 2062 if (++I < E) OS << ", "; 2063 } 2064 2065 OS << ">"; 2066 break; 2067 } 2068 case k_VectorList: 2069 OS << "<vector_list " << VectorList.Count << " * " 2070 << VectorList.RegNum << ">"; 2071 break; 2072 case k_VectorListAllLanes: 2073 OS << "<vector_list(all lanes) " << VectorList.Count << " * " 2074 << VectorList.RegNum << ">"; 2075 break; 2076 case k_VectorListIndexed: 2077 OS << "<vector_list(lane " << VectorList.LaneIndex << ") " 2078 << VectorList.Count << " * " << VectorList.RegNum << ">"; 2079 break; 2080 case k_Token: 2081 OS << "'" << getToken() << "'"; 2082 break; 2083 case k_VectorIndex: 2084 OS << "<vectorindex " << getVectorIndex() << ">"; 2085 break; 2086 } 2087 } 2088 2089 /// @name Auto-generated Match Functions 2090 /// { 2091 2092 static unsigned MatchRegisterName(StringRef Name); 2093 2094 /// } 2095 2096 bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2097 SMLoc &StartLoc, SMLoc &EndLoc) { 2098 RegNo = tryParseRegister(); 2099 2100 return (RegNo == (unsigned)-1); 2101 } 2102 2103 /// Try to parse a register name. The token must be an Identifier when called, 2104 /// and if it is a register name the token is eaten and the register number is 2105 /// returned. Otherwise return -1. 2106 /// 2107 int ARMAsmParser::tryParseRegister() { 2108 const AsmToken &Tok = Parser.getTok(); 2109 if (Tok.isNot(AsmToken::Identifier)) return -1; 2110 2111 // FIXME: Validate register for the current architecture; we have to do 2112 // validation later, so maybe there is no need for this here. 2113 std::string lowerCase = Tok.getString().lower(); 2114 unsigned RegNum = MatchRegisterName(lowerCase); 2115 if (!RegNum) { 2116 RegNum = StringSwitch<unsigned>(lowerCase) 2117 .Case("r13", ARM::SP) 2118 .Case("r14", ARM::LR) 2119 .Case("r15", ARM::PC) 2120 .Case("ip", ARM::R12) 2121 .Default(0); 2122 } 2123 if (!RegNum) return -1; 2124 2125 Parser.Lex(); // Eat identifier token. 2126 2127 return RegNum; 2128 } 2129 2130 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2131 // If a recoverable error occurs, return 1. If an irrecoverable error 2132 // occurs, return -1. An irrecoverable error is one where tokens have been 2133 // consumed in the process of trying to parse the shifter (i.e., when it is 2134 // indeed a shifter operand, but malformed). 2135 int ARMAsmParser::tryParseShiftRegister( 2136 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2137 SMLoc S = Parser.getTok().getLoc(); 2138 const AsmToken &Tok = Parser.getTok(); 2139 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2140 2141 std::string lowerCase = Tok.getString().lower(); 2142 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2143 .Case("lsl", ARM_AM::lsl) 2144 .Case("lsr", ARM_AM::lsr) 2145 .Case("asr", ARM_AM::asr) 2146 .Case("ror", ARM_AM::ror) 2147 .Case("rrx", ARM_AM::rrx) 2148 .Default(ARM_AM::no_shift); 2149 2150 if (ShiftTy == ARM_AM::no_shift) 2151 return 1; 2152 2153 Parser.Lex(); // Eat the operator. 2154 2155 // The source register for the shift has already been added to the 2156 // operand list, so we need to pop it off and combine it into the shifted 2157 // register operand instead. 2158 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2159 if (!PrevOp->isReg()) 2160 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2161 int SrcReg = PrevOp->getReg(); 2162 int64_t Imm = 0; 2163 int ShiftReg = 0; 2164 if (ShiftTy == ARM_AM::rrx) { 2165 // RRX Doesn't have an explicit shift amount. The encoder expects 2166 // the shift register to be the same as the source register. Seems odd, 2167 // but OK. 2168 ShiftReg = SrcReg; 2169 } else { 2170 // Figure out if this is shifted by a constant or a register (for non-RRX). 2171 if (Parser.getTok().is(AsmToken::Hash)) { 2172 Parser.Lex(); // Eat hash. 2173 SMLoc ImmLoc = Parser.getTok().getLoc(); 2174 const MCExpr *ShiftExpr = 0; 2175 if (getParser().ParseExpression(ShiftExpr)) { 2176 Error(ImmLoc, "invalid immediate shift value"); 2177 return -1; 2178 } 2179 // The expression must be evaluatable as an immediate. 2180 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2181 if (!CE) { 2182 Error(ImmLoc, "invalid immediate shift value"); 2183 return -1; 2184 } 2185 // Range check the immediate. 2186 // lsl, ror: 0 <= imm <= 31 2187 // lsr, asr: 0 <= imm <= 32 2188 Imm = CE->getValue(); 2189 if (Imm < 0 || 2190 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2191 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2192 Error(ImmLoc, "immediate shift value out of range"); 2193 return -1; 2194 } 2195 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2196 ShiftReg = tryParseRegister(); 2197 SMLoc L = Parser.getTok().getLoc(); 2198 if (ShiftReg == -1) { 2199 Error (L, "expected immediate or register in shift operand"); 2200 return -1; 2201 } 2202 } else { 2203 Error (Parser.getTok().getLoc(), 2204 "expected immediate or register in shift operand"); 2205 return -1; 2206 } 2207 } 2208 2209 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2210 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2211 ShiftReg, Imm, 2212 S, Parser.getTok().getLoc())); 2213 else 2214 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2215 S, Parser.getTok().getLoc())); 2216 2217 return 0; 2218 } 2219 2220 2221 /// Try to parse a register name. The token must be an Identifier when called. 2222 /// If it's a register, an AsmOperand is created. Another AsmOperand is created 2223 /// if there is a "writeback". 'true' if it's not a register. 2224 /// 2225 /// TODO this is likely to change to allow different register types and or to 2226 /// parse for a specific register type. 2227 bool ARMAsmParser:: 2228 tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2229 SMLoc S = Parser.getTok().getLoc(); 2230 int RegNo = tryParseRegister(); 2231 if (RegNo == -1) 2232 return true; 2233 2234 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2235 2236 const AsmToken &ExclaimTok = Parser.getTok(); 2237 if (ExclaimTok.is(AsmToken::Exclaim)) { 2238 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2239 ExclaimTok.getLoc())); 2240 Parser.Lex(); // Eat exclaim token 2241 return false; 2242 } 2243 2244 // Also check for an index operand. This is only legal for vector registers, 2245 // but that'll get caught OK in operand matching, so we don't need to 2246 // explicitly filter everything else out here. 2247 if (Parser.getTok().is(AsmToken::LBrac)) { 2248 SMLoc SIdx = Parser.getTok().getLoc(); 2249 Parser.Lex(); // Eat left bracket token. 2250 2251 const MCExpr *ImmVal; 2252 if (getParser().ParseExpression(ImmVal)) 2253 return MatchOperand_ParseFail; 2254 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2255 if (!MCE) { 2256 TokError("immediate value expected for vector index"); 2257 return MatchOperand_ParseFail; 2258 } 2259 2260 SMLoc E = Parser.getTok().getLoc(); 2261 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2262 Error(E, "']' expected"); 2263 return MatchOperand_ParseFail; 2264 } 2265 2266 Parser.Lex(); // Eat right bracket token. 2267 2268 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2269 SIdx, E, 2270 getContext())); 2271 } 2272 2273 return false; 2274 } 2275 2276 /// MatchCoprocessorOperandName - Try to parse an coprocessor related 2277 /// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2278 /// "c5", ... 2279 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2280 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2281 // but efficient. 2282 switch (Name.size()) { 2283 default: break; 2284 case 2: 2285 if (Name[0] != CoprocOp) 2286 return -1; 2287 switch (Name[1]) { 2288 default: return -1; 2289 case '0': return 0; 2290 case '1': return 1; 2291 case '2': return 2; 2292 case '3': return 3; 2293 case '4': return 4; 2294 case '5': return 5; 2295 case '6': return 6; 2296 case '7': return 7; 2297 case '8': return 8; 2298 case '9': return 9; 2299 } 2300 break; 2301 case 3: 2302 if (Name[0] != CoprocOp || Name[1] != '1') 2303 return -1; 2304 switch (Name[2]) { 2305 default: return -1; 2306 case '0': return 10; 2307 case '1': return 11; 2308 case '2': return 12; 2309 case '3': return 13; 2310 case '4': return 14; 2311 case '5': return 15; 2312 } 2313 break; 2314 } 2315 2316 return -1; 2317 } 2318 2319 /// parseITCondCode - Try to parse a condition code for an IT instruction. 2320 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2321 parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2322 SMLoc S = Parser.getTok().getLoc(); 2323 const AsmToken &Tok = Parser.getTok(); 2324 if (!Tok.is(AsmToken::Identifier)) 2325 return MatchOperand_NoMatch; 2326 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2327 .Case("eq", ARMCC::EQ) 2328 .Case("ne", ARMCC::NE) 2329 .Case("hs", ARMCC::HS) 2330 .Case("cs", ARMCC::HS) 2331 .Case("lo", ARMCC::LO) 2332 .Case("cc", ARMCC::LO) 2333 .Case("mi", ARMCC::MI) 2334 .Case("pl", ARMCC::PL) 2335 .Case("vs", ARMCC::VS) 2336 .Case("vc", ARMCC::VC) 2337 .Case("hi", ARMCC::HI) 2338 .Case("ls", ARMCC::LS) 2339 .Case("ge", ARMCC::GE) 2340 .Case("lt", ARMCC::LT) 2341 .Case("gt", ARMCC::GT) 2342 .Case("le", ARMCC::LE) 2343 .Case("al", ARMCC::AL) 2344 .Default(~0U); 2345 if (CC == ~0U) 2346 return MatchOperand_NoMatch; 2347 Parser.Lex(); // Eat the token. 2348 2349 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2350 2351 return MatchOperand_Success; 2352 } 2353 2354 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2355 /// token must be an Identifier when called, and if it is a coprocessor 2356 /// number, the token is eaten and the operand is added to the operand list. 2357 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2358 parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2359 SMLoc S = Parser.getTok().getLoc(); 2360 const AsmToken &Tok = Parser.getTok(); 2361 if (Tok.isNot(AsmToken::Identifier)) 2362 return MatchOperand_NoMatch; 2363 2364 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2365 if (Num == -1) 2366 return MatchOperand_NoMatch; 2367 2368 Parser.Lex(); // Eat identifier token. 2369 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2370 return MatchOperand_Success; 2371 } 2372 2373 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2374 /// token must be an Identifier when called, and if it is a coprocessor 2375 /// number, the token is eaten and the operand is added to the operand list. 2376 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2377 parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2378 SMLoc S = Parser.getTok().getLoc(); 2379 const AsmToken &Tok = Parser.getTok(); 2380 if (Tok.isNot(AsmToken::Identifier)) 2381 return MatchOperand_NoMatch; 2382 2383 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2384 if (Reg == -1) 2385 return MatchOperand_NoMatch; 2386 2387 Parser.Lex(); // Eat identifier token. 2388 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2389 return MatchOperand_Success; 2390 } 2391 2392 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2393 /// coproc_option : '{' imm0_255 '}' 2394 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2395 parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2396 SMLoc S = Parser.getTok().getLoc(); 2397 2398 // If this isn't a '{', this isn't a coprocessor immediate operand. 2399 if (Parser.getTok().isNot(AsmToken::LCurly)) 2400 return MatchOperand_NoMatch; 2401 Parser.Lex(); // Eat the '{' 2402 2403 const MCExpr *Expr; 2404 SMLoc Loc = Parser.getTok().getLoc(); 2405 if (getParser().ParseExpression(Expr)) { 2406 Error(Loc, "illegal expression"); 2407 return MatchOperand_ParseFail; 2408 } 2409 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2410 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2411 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2412 return MatchOperand_ParseFail; 2413 } 2414 int Val = CE->getValue(); 2415 2416 // Check for and consume the closing '}' 2417 if (Parser.getTok().isNot(AsmToken::RCurly)) 2418 return MatchOperand_ParseFail; 2419 SMLoc E = Parser.getTok().getLoc(); 2420 Parser.Lex(); // Eat the '}' 2421 2422 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2423 return MatchOperand_Success; 2424 } 2425 2426 // For register list parsing, we need to map from raw GPR register numbering 2427 // to the enumeration values. The enumeration values aren't sorted by 2428 // register number due to our using "sp", "lr" and "pc" as canonical names. 2429 static unsigned getNextRegister(unsigned Reg) { 2430 // If this is a GPR, we need to do it manually, otherwise we can rely 2431 // on the sort ordering of the enumeration since the other reg-classes 2432 // are sane. 2433 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2434 return Reg + 1; 2435 switch(Reg) { 2436 default: assert(0 && "Invalid GPR number!"); 2437 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2438 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2439 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2440 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2441 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2442 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2443 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2444 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2445 } 2446 } 2447 2448 // Return the low-subreg of a given Q register. 2449 static unsigned getDRegFromQReg(unsigned QReg) { 2450 switch (QReg) { 2451 default: llvm_unreachable("expected a Q register!"); 2452 case ARM::Q0: return ARM::D0; 2453 case ARM::Q1: return ARM::D2; 2454 case ARM::Q2: return ARM::D4; 2455 case ARM::Q3: return ARM::D6; 2456 case ARM::Q4: return ARM::D8; 2457 case ARM::Q5: return ARM::D10; 2458 case ARM::Q6: return ARM::D12; 2459 case ARM::Q7: return ARM::D14; 2460 case ARM::Q8: return ARM::D16; 2461 case ARM::Q9: return ARM::D18; 2462 case ARM::Q10: return ARM::D20; 2463 case ARM::Q11: return ARM::D22; 2464 case ARM::Q12: return ARM::D24; 2465 case ARM::Q13: return ARM::D26; 2466 case ARM::Q14: return ARM::D28; 2467 case ARM::Q15: return ARM::D30; 2468 } 2469 } 2470 2471 /// Parse a register list. 2472 bool ARMAsmParser:: 2473 parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2474 assert(Parser.getTok().is(AsmToken::LCurly) && 2475 "Token is not a Left Curly Brace"); 2476 SMLoc S = Parser.getTok().getLoc(); 2477 Parser.Lex(); // Eat '{' token. 2478 SMLoc RegLoc = Parser.getTok().getLoc(); 2479 2480 // Check the first register in the list to see what register class 2481 // this is a list of. 2482 int Reg = tryParseRegister(); 2483 if (Reg == -1) 2484 return Error(RegLoc, "register expected"); 2485 2486 // The reglist instructions have at most 16 registers, so reserve 2487 // space for that many. 2488 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2489 2490 // Allow Q regs and just interpret them as the two D sub-registers. 2491 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2492 Reg = getDRegFromQReg(Reg); 2493 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2494 ++Reg; 2495 } 2496 const MCRegisterClass *RC; 2497 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2498 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2499 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2500 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2501 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2502 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2503 else 2504 return Error(RegLoc, "invalid register in register list"); 2505 2506 // Store the register. 2507 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2508 2509 // This starts immediately after the first register token in the list, 2510 // so we can see either a comma or a minus (range separator) as a legal 2511 // next token. 2512 while (Parser.getTok().is(AsmToken::Comma) || 2513 Parser.getTok().is(AsmToken::Minus)) { 2514 if (Parser.getTok().is(AsmToken::Minus)) { 2515 Parser.Lex(); // Eat the minus. 2516 SMLoc EndLoc = Parser.getTok().getLoc(); 2517 int EndReg = tryParseRegister(); 2518 if (EndReg == -1) 2519 return Error(EndLoc, "register expected"); 2520 // Allow Q regs and just interpret them as the two D sub-registers. 2521 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2522 EndReg = getDRegFromQReg(EndReg) + 1; 2523 // If the register is the same as the start reg, there's nothing 2524 // more to do. 2525 if (Reg == EndReg) 2526 continue; 2527 // The register must be in the same register class as the first. 2528 if (!RC->contains(EndReg)) 2529 return Error(EndLoc, "invalid register in register list"); 2530 // Ranges must go from low to high. 2531 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2532 return Error(EndLoc, "bad range in register list"); 2533 2534 // Add all the registers in the range to the register list. 2535 while (Reg != EndReg) { 2536 Reg = getNextRegister(Reg); 2537 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2538 } 2539 continue; 2540 } 2541 Parser.Lex(); // Eat the comma. 2542 RegLoc = Parser.getTok().getLoc(); 2543 int OldReg = Reg; 2544 Reg = tryParseRegister(); 2545 if (Reg == -1) 2546 return Error(RegLoc, "register expected"); 2547 // Allow Q regs and just interpret them as the two D sub-registers. 2548 bool isQReg = false; 2549 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2550 Reg = getDRegFromQReg(Reg); 2551 isQReg = true; 2552 } 2553 // The register must be in the same register class as the first. 2554 if (!RC->contains(Reg)) 2555 return Error(RegLoc, "invalid register in register list"); 2556 // List must be monotonically increasing. 2557 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2558 return Error(RegLoc, "register list not in ascending order"); 2559 // VFP register lists must also be contiguous. 2560 // It's OK to use the enumeration values directly here rather, as the 2561 // VFP register classes have the enum sorted properly. 2562 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2563 Reg != OldReg + 1) 2564 return Error(RegLoc, "non-contiguous register range"); 2565 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2566 if (isQReg) 2567 Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc)); 2568 } 2569 2570 SMLoc E = Parser.getTok().getLoc(); 2571 if (Parser.getTok().isNot(AsmToken::RCurly)) 2572 return Error(E, "'}' expected"); 2573 Parser.Lex(); // Eat '}' token. 2574 2575 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2576 return false; 2577 } 2578 2579 // Helper function to parse the lane index for vector lists. 2580 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2581 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) { 2582 Index = 0; // Always return a defined index value. 2583 if (Parser.getTok().is(AsmToken::LBrac)) { 2584 Parser.Lex(); // Eat the '['. 2585 if (Parser.getTok().is(AsmToken::RBrac)) { 2586 // "Dn[]" is the 'all lanes' syntax. 2587 LaneKind = AllLanes; 2588 Parser.Lex(); // Eat the ']'. 2589 return MatchOperand_Success; 2590 } 2591 if (Parser.getTok().is(AsmToken::Integer)) { 2592 int64_t Val = Parser.getTok().getIntVal(); 2593 // Make this range check context sensitive for .8, .16, .32. 2594 if (Val < 0 && Val > 7) 2595 Error(Parser.getTok().getLoc(), "lane index out of range"); 2596 Index = Val; 2597 LaneKind = IndexedLane; 2598 Parser.Lex(); // Eat the token; 2599 if (Parser.getTok().isNot(AsmToken::RBrac)) 2600 Error(Parser.getTok().getLoc(), "']' expected"); 2601 Parser.Lex(); // Eat the ']'. 2602 return MatchOperand_Success; 2603 } 2604 Error(Parser.getTok().getLoc(), "lane index must be empty or an integer"); 2605 return MatchOperand_ParseFail; 2606 } 2607 LaneKind = NoLanes; 2608 return MatchOperand_Success; 2609 } 2610 2611 // parse a vector register list 2612 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2613 parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2614 VectorLaneTy LaneKind; 2615 unsigned LaneIndex; 2616 SMLoc S = Parser.getTok().getLoc(); 2617 // As an extension (to match gas), support a plain D register or Q register 2618 // (without encosing curly braces) as a single or double entry list, 2619 // respectively. 2620 if (Parser.getTok().is(AsmToken::Identifier)) { 2621 int Reg = tryParseRegister(); 2622 if (Reg == -1) 2623 return MatchOperand_NoMatch; 2624 SMLoc E = Parser.getTok().getLoc(); 2625 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 2626 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 2627 if (Res != MatchOperand_Success) 2628 return Res; 2629 switch (LaneKind) { 2630 default: 2631 assert(0 && "unexpected lane kind!"); 2632 case NoLanes: 2633 E = Parser.getTok().getLoc(); 2634 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, S, E)); 2635 break; 2636 case AllLanes: 2637 E = Parser.getTok().getLoc(); 2638 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E)); 2639 break; 2640 case IndexedLane: 2641 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1, 2642 LaneIndex, S,E)); 2643 break; 2644 } 2645 return MatchOperand_Success; 2646 } 2647 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2648 Reg = getDRegFromQReg(Reg); 2649 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex); 2650 if (Res != MatchOperand_Success) 2651 return Res; 2652 switch (LaneKind) { 2653 default: 2654 assert(0 && "unexpected lane kind!"); 2655 case NoLanes: 2656 E = Parser.getTok().getLoc(); 2657 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, S, E)); 2658 break; 2659 case AllLanes: 2660 E = Parser.getTok().getLoc(); 2661 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E)); 2662 break; 2663 case IndexedLane: 2664 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2, 2665 LaneIndex, S,E)); 2666 break; 2667 } 2668 return MatchOperand_Success; 2669 } 2670 Error(S, "vector register expected"); 2671 return MatchOperand_ParseFail; 2672 } 2673 2674 if (Parser.getTok().isNot(AsmToken::LCurly)) 2675 return MatchOperand_NoMatch; 2676 2677 Parser.Lex(); // Eat '{' token. 2678 SMLoc RegLoc = Parser.getTok().getLoc(); 2679 2680 int Reg = tryParseRegister(); 2681 if (Reg == -1) { 2682 Error(RegLoc, "register expected"); 2683 return MatchOperand_ParseFail; 2684 } 2685 unsigned Count = 1; 2686 unsigned FirstReg = Reg; 2687 // The list is of D registers, but we also allow Q regs and just interpret 2688 // them as the two D sub-registers. 2689 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2690 FirstReg = Reg = getDRegFromQReg(Reg); 2691 ++Reg; 2692 ++Count; 2693 } 2694 if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success) 2695 return MatchOperand_ParseFail; 2696 2697 while (Parser.getTok().is(AsmToken::Comma) || 2698 Parser.getTok().is(AsmToken::Minus)) { 2699 if (Parser.getTok().is(AsmToken::Minus)) { 2700 Parser.Lex(); // Eat the minus. 2701 SMLoc EndLoc = Parser.getTok().getLoc(); 2702 int EndReg = tryParseRegister(); 2703 if (EndReg == -1) { 2704 Error(EndLoc, "register expected"); 2705 return MatchOperand_ParseFail; 2706 } 2707 // Allow Q regs and just interpret them as the two D sub-registers. 2708 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2709 EndReg = getDRegFromQReg(EndReg) + 1; 2710 // If the register is the same as the start reg, there's nothing 2711 // more to do. 2712 if (Reg == EndReg) 2713 continue; 2714 // The register must be in the same register class as the first. 2715 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { 2716 Error(EndLoc, "invalid register in register list"); 2717 return MatchOperand_ParseFail; 2718 } 2719 // Ranges must go from low to high. 2720 if (Reg > EndReg) { 2721 Error(EndLoc, "bad range in register list"); 2722 return MatchOperand_ParseFail; 2723 } 2724 // Parse the lane specifier if present. 2725 VectorLaneTy NextLaneKind; 2726 unsigned NextLaneIndex; 2727 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2728 return MatchOperand_ParseFail; 2729 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2730 Error(EndLoc, "mismatched lane index in register list"); 2731 return MatchOperand_ParseFail; 2732 } 2733 EndLoc = Parser.getTok().getLoc(); 2734 2735 // Add all the registers in the range to the register list. 2736 Count += EndReg - Reg; 2737 Reg = EndReg; 2738 continue; 2739 } 2740 Parser.Lex(); // Eat the comma. 2741 RegLoc = Parser.getTok().getLoc(); 2742 int OldReg = Reg; 2743 Reg = tryParseRegister(); 2744 if (Reg == -1) { 2745 Error(RegLoc, "register expected"); 2746 return MatchOperand_ParseFail; 2747 } 2748 // vector register lists must be contiguous. 2749 // It's OK to use the enumeration values directly here rather, as the 2750 // VFP register classes have the enum sorted properly. 2751 // 2752 // The list is of D registers, but we also allow Q regs and just interpret 2753 // them as the two D sub-registers. 2754 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2755 Reg = getDRegFromQReg(Reg); 2756 if (Reg != OldReg + 1) { 2757 Error(RegLoc, "non-contiguous register range"); 2758 return MatchOperand_ParseFail; 2759 } 2760 ++Reg; 2761 Count += 2; 2762 // Parse the lane specifier if present. 2763 VectorLaneTy NextLaneKind; 2764 unsigned NextLaneIndex; 2765 SMLoc EndLoc = Parser.getTok().getLoc(); 2766 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2767 return MatchOperand_ParseFail; 2768 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2769 Error(EndLoc, "mismatched lane index in register list"); 2770 return MatchOperand_ParseFail; 2771 } 2772 continue; 2773 } 2774 // Normal D register. Just check that it's contiguous and keep going. 2775 if (Reg != OldReg + 1) { 2776 Error(RegLoc, "non-contiguous register range"); 2777 return MatchOperand_ParseFail; 2778 } 2779 ++Count; 2780 // Parse the lane specifier if present. 2781 VectorLaneTy NextLaneKind; 2782 unsigned NextLaneIndex; 2783 SMLoc EndLoc = Parser.getTok().getLoc(); 2784 if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success) 2785 return MatchOperand_ParseFail; 2786 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 2787 Error(EndLoc, "mismatched lane index in register list"); 2788 return MatchOperand_ParseFail; 2789 } 2790 } 2791 2792 SMLoc E = Parser.getTok().getLoc(); 2793 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2794 Error(E, "'}' expected"); 2795 return MatchOperand_ParseFail; 2796 } 2797 Parser.Lex(); // Eat '}' token. 2798 2799 switch (LaneKind) { 2800 default: 2801 assert(0 && "unexpected lane kind in register list."); 2802 case NoLanes: 2803 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2804 break; 2805 case AllLanes: 2806 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count, 2807 S, E)); 2808 break; 2809 case IndexedLane: 2810 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count, 2811 LaneIndex, S, E)); 2812 break; 2813 } 2814 return MatchOperand_Success; 2815 } 2816 2817 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2818 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2819 parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2820 SMLoc S = Parser.getTok().getLoc(); 2821 const AsmToken &Tok = Parser.getTok(); 2822 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2823 StringRef OptStr = Tok.getString(); 2824 2825 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2826 .Case("sy", ARM_MB::SY) 2827 .Case("st", ARM_MB::ST) 2828 .Case("sh", ARM_MB::ISH) 2829 .Case("ish", ARM_MB::ISH) 2830 .Case("shst", ARM_MB::ISHST) 2831 .Case("ishst", ARM_MB::ISHST) 2832 .Case("nsh", ARM_MB::NSH) 2833 .Case("un", ARM_MB::NSH) 2834 .Case("nshst", ARM_MB::NSHST) 2835 .Case("unst", ARM_MB::NSHST) 2836 .Case("osh", ARM_MB::OSH) 2837 .Case("oshst", ARM_MB::OSHST) 2838 .Default(~0U); 2839 2840 if (Opt == ~0U) 2841 return MatchOperand_NoMatch; 2842 2843 Parser.Lex(); // Eat identifier token. 2844 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2845 return MatchOperand_Success; 2846 } 2847 2848 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2849 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2850 parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2851 SMLoc S = Parser.getTok().getLoc(); 2852 const AsmToken &Tok = Parser.getTok(); 2853 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2854 StringRef IFlagsStr = Tok.getString(); 2855 2856 // An iflags string of "none" is interpreted to mean that none of the AIF 2857 // bits are set. Not a terribly useful instruction, but a valid encoding. 2858 unsigned IFlags = 0; 2859 if (IFlagsStr != "none") { 2860 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2861 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2862 .Case("a", ARM_PROC::A) 2863 .Case("i", ARM_PROC::I) 2864 .Case("f", ARM_PROC::F) 2865 .Default(~0U); 2866 2867 // If some specific iflag is already set, it means that some letter is 2868 // present more than once, this is not acceptable. 2869 if (Flag == ~0U || (IFlags & Flag)) 2870 return MatchOperand_NoMatch; 2871 2872 IFlags |= Flag; 2873 } 2874 } 2875 2876 Parser.Lex(); // Eat identifier token. 2877 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2878 return MatchOperand_Success; 2879 } 2880 2881 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2882 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2883 parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2884 SMLoc S = Parser.getTok().getLoc(); 2885 const AsmToken &Tok = Parser.getTok(); 2886 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2887 StringRef Mask = Tok.getString(); 2888 2889 if (isMClass()) { 2890 // See ARMv6-M 10.1.1 2891 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2892 .Case("apsr", 0) 2893 .Case("iapsr", 1) 2894 .Case("eapsr", 2) 2895 .Case("xpsr", 3) 2896 .Case("ipsr", 5) 2897 .Case("epsr", 6) 2898 .Case("iepsr", 7) 2899 .Case("msp", 8) 2900 .Case("psp", 9) 2901 .Case("primask", 16) 2902 .Case("basepri", 17) 2903 .Case("basepri_max", 18) 2904 .Case("faultmask", 19) 2905 .Case("control", 20) 2906 .Default(~0U); 2907 2908 if (FlagsVal == ~0U) 2909 return MatchOperand_NoMatch; 2910 2911 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2912 // basepri, basepri_max and faultmask only valid for V7m. 2913 return MatchOperand_NoMatch; 2914 2915 Parser.Lex(); // Eat identifier token. 2916 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2917 return MatchOperand_Success; 2918 } 2919 2920 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2921 size_t Start = 0, Next = Mask.find('_'); 2922 StringRef Flags = ""; 2923 std::string SpecReg = Mask.slice(Start, Next).lower(); 2924 if (Next != StringRef::npos) 2925 Flags = Mask.slice(Next+1, Mask.size()); 2926 2927 // FlagsVal contains the complete mask: 2928 // 3-0: Mask 2929 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2930 unsigned FlagsVal = 0; 2931 2932 if (SpecReg == "apsr") { 2933 FlagsVal = StringSwitch<unsigned>(Flags) 2934 .Case("nzcvq", 0x8) // same as CPSR_f 2935 .Case("g", 0x4) // same as CPSR_s 2936 .Case("nzcvqg", 0xc) // same as CPSR_fs 2937 .Default(~0U); 2938 2939 if (FlagsVal == ~0U) { 2940 if (!Flags.empty()) 2941 return MatchOperand_NoMatch; 2942 else 2943 FlagsVal = 8; // No flag 2944 } 2945 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2946 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2947 Flags = "fc"; 2948 for (int i = 0, e = Flags.size(); i != e; ++i) { 2949 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2950 .Case("c", 1) 2951 .Case("x", 2) 2952 .Case("s", 4) 2953 .Case("f", 8) 2954 .Default(~0U); 2955 2956 // If some specific flag is already set, it means that some letter is 2957 // present more than once, this is not acceptable. 2958 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2959 return MatchOperand_NoMatch; 2960 FlagsVal |= Flag; 2961 } 2962 } else // No match for special register. 2963 return MatchOperand_NoMatch; 2964 2965 // Special register without flags is NOT equivalent to "fc" flags. 2966 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 2967 // two lines would enable gas compatibility at the expense of breaking 2968 // round-tripping. 2969 // 2970 // if (!FlagsVal) 2971 // FlagsVal = 0x9; 2972 2973 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2974 if (SpecReg == "spsr") 2975 FlagsVal |= 16; 2976 2977 Parser.Lex(); // Eat identifier token. 2978 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2979 return MatchOperand_Success; 2980 } 2981 2982 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2983 parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2984 int Low, int High) { 2985 const AsmToken &Tok = Parser.getTok(); 2986 if (Tok.isNot(AsmToken::Identifier)) { 2987 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2988 return MatchOperand_ParseFail; 2989 } 2990 StringRef ShiftName = Tok.getString(); 2991 std::string LowerOp = Op.lower(); 2992 std::string UpperOp = Op.upper(); 2993 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2994 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2995 return MatchOperand_ParseFail; 2996 } 2997 Parser.Lex(); // Eat shift type token. 2998 2999 // There must be a '#' and a shift amount. 3000 if (Parser.getTok().isNot(AsmToken::Hash)) { 3001 Error(Parser.getTok().getLoc(), "'#' expected"); 3002 return MatchOperand_ParseFail; 3003 } 3004 Parser.Lex(); // Eat hash token. 3005 3006 const MCExpr *ShiftAmount; 3007 SMLoc Loc = Parser.getTok().getLoc(); 3008 if (getParser().ParseExpression(ShiftAmount)) { 3009 Error(Loc, "illegal expression"); 3010 return MatchOperand_ParseFail; 3011 } 3012 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3013 if (!CE) { 3014 Error(Loc, "constant expression expected"); 3015 return MatchOperand_ParseFail; 3016 } 3017 int Val = CE->getValue(); 3018 if (Val < Low || Val > High) { 3019 Error(Loc, "immediate value out of range"); 3020 return MatchOperand_ParseFail; 3021 } 3022 3023 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 3024 3025 return MatchOperand_Success; 3026 } 3027 3028 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3029 parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3030 const AsmToken &Tok = Parser.getTok(); 3031 SMLoc S = Tok.getLoc(); 3032 if (Tok.isNot(AsmToken::Identifier)) { 3033 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3034 return MatchOperand_ParseFail; 3035 } 3036 int Val = StringSwitch<int>(Tok.getString()) 3037 .Case("be", 1) 3038 .Case("le", 0) 3039 .Default(-1); 3040 Parser.Lex(); // Eat the token. 3041 3042 if (Val == -1) { 3043 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 3044 return MatchOperand_ParseFail; 3045 } 3046 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 3047 getContext()), 3048 S, Parser.getTok().getLoc())); 3049 return MatchOperand_Success; 3050 } 3051 3052 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 3053 /// instructions. Legal values are: 3054 /// lsl #n 'n' in [0,31] 3055 /// asr #n 'n' in [1,32] 3056 /// n == 32 encoded as n == 0. 3057 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3058 parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3059 const AsmToken &Tok = Parser.getTok(); 3060 SMLoc S = Tok.getLoc(); 3061 if (Tok.isNot(AsmToken::Identifier)) { 3062 Error(S, "shift operator 'asr' or 'lsl' expected"); 3063 return MatchOperand_ParseFail; 3064 } 3065 StringRef ShiftName = Tok.getString(); 3066 bool isASR; 3067 if (ShiftName == "lsl" || ShiftName == "LSL") 3068 isASR = false; 3069 else if (ShiftName == "asr" || ShiftName == "ASR") 3070 isASR = true; 3071 else { 3072 Error(S, "shift operator 'asr' or 'lsl' expected"); 3073 return MatchOperand_ParseFail; 3074 } 3075 Parser.Lex(); // Eat the operator. 3076 3077 // A '#' and a shift amount. 3078 if (Parser.getTok().isNot(AsmToken::Hash)) { 3079 Error(Parser.getTok().getLoc(), "'#' expected"); 3080 return MatchOperand_ParseFail; 3081 } 3082 Parser.Lex(); // Eat hash token. 3083 3084 const MCExpr *ShiftAmount; 3085 SMLoc E = Parser.getTok().getLoc(); 3086 if (getParser().ParseExpression(ShiftAmount)) { 3087 Error(E, "malformed shift expression"); 3088 return MatchOperand_ParseFail; 3089 } 3090 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3091 if (!CE) { 3092 Error(E, "shift amount must be an immediate"); 3093 return MatchOperand_ParseFail; 3094 } 3095 3096 int64_t Val = CE->getValue(); 3097 if (isASR) { 3098 // Shift amount must be in [1,32] 3099 if (Val < 1 || Val > 32) { 3100 Error(E, "'asr' shift amount must be in range [1,32]"); 3101 return MatchOperand_ParseFail; 3102 } 3103 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 3104 if (isThumb() && Val == 32) { 3105 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 3106 return MatchOperand_ParseFail; 3107 } 3108 if (Val == 32) Val = 0; 3109 } else { 3110 // Shift amount must be in [1,32] 3111 if (Val < 0 || Val > 31) { 3112 Error(E, "'lsr' shift amount must be in range [0,31]"); 3113 return MatchOperand_ParseFail; 3114 } 3115 } 3116 3117 E = Parser.getTok().getLoc(); 3118 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 3119 3120 return MatchOperand_Success; 3121 } 3122 3123 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 3124 /// of instructions. Legal values are: 3125 /// ror #n 'n' in {0, 8, 16, 24} 3126 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3127 parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3128 const AsmToken &Tok = Parser.getTok(); 3129 SMLoc S = Tok.getLoc(); 3130 if (Tok.isNot(AsmToken::Identifier)) 3131 return MatchOperand_NoMatch; 3132 StringRef ShiftName = Tok.getString(); 3133 if (ShiftName != "ror" && ShiftName != "ROR") 3134 return MatchOperand_NoMatch; 3135 Parser.Lex(); // Eat the operator. 3136 3137 // A '#' and a rotate amount. 3138 if (Parser.getTok().isNot(AsmToken::Hash)) { 3139 Error(Parser.getTok().getLoc(), "'#' expected"); 3140 return MatchOperand_ParseFail; 3141 } 3142 Parser.Lex(); // Eat hash token. 3143 3144 const MCExpr *ShiftAmount; 3145 SMLoc E = Parser.getTok().getLoc(); 3146 if (getParser().ParseExpression(ShiftAmount)) { 3147 Error(E, "malformed rotate expression"); 3148 return MatchOperand_ParseFail; 3149 } 3150 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 3151 if (!CE) { 3152 Error(E, "rotate amount must be an immediate"); 3153 return MatchOperand_ParseFail; 3154 } 3155 3156 int64_t Val = CE->getValue(); 3157 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 3158 // normally, zero is represented in asm by omitting the rotate operand 3159 // entirely. 3160 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 3161 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 3162 return MatchOperand_ParseFail; 3163 } 3164 3165 E = Parser.getTok().getLoc(); 3166 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 3167 3168 return MatchOperand_Success; 3169 } 3170 3171 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3172 parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3173 SMLoc S = Parser.getTok().getLoc(); 3174 // The bitfield descriptor is really two operands, the LSB and the width. 3175 if (Parser.getTok().isNot(AsmToken::Hash)) { 3176 Error(Parser.getTok().getLoc(), "'#' expected"); 3177 return MatchOperand_ParseFail; 3178 } 3179 Parser.Lex(); // Eat hash token. 3180 3181 const MCExpr *LSBExpr; 3182 SMLoc E = Parser.getTok().getLoc(); 3183 if (getParser().ParseExpression(LSBExpr)) { 3184 Error(E, "malformed immediate expression"); 3185 return MatchOperand_ParseFail; 3186 } 3187 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 3188 if (!CE) { 3189 Error(E, "'lsb' operand must be an immediate"); 3190 return MatchOperand_ParseFail; 3191 } 3192 3193 int64_t LSB = CE->getValue(); 3194 // The LSB must be in the range [0,31] 3195 if (LSB < 0 || LSB > 31) { 3196 Error(E, "'lsb' operand must be in the range [0,31]"); 3197 return MatchOperand_ParseFail; 3198 } 3199 E = Parser.getTok().getLoc(); 3200 3201 // Expect another immediate operand. 3202 if (Parser.getTok().isNot(AsmToken::Comma)) { 3203 Error(Parser.getTok().getLoc(), "too few operands"); 3204 return MatchOperand_ParseFail; 3205 } 3206 Parser.Lex(); // Eat hash token. 3207 if (Parser.getTok().isNot(AsmToken::Hash)) { 3208 Error(Parser.getTok().getLoc(), "'#' expected"); 3209 return MatchOperand_ParseFail; 3210 } 3211 Parser.Lex(); // Eat hash token. 3212 3213 const MCExpr *WidthExpr; 3214 if (getParser().ParseExpression(WidthExpr)) { 3215 Error(E, "malformed immediate expression"); 3216 return MatchOperand_ParseFail; 3217 } 3218 CE = dyn_cast<MCConstantExpr>(WidthExpr); 3219 if (!CE) { 3220 Error(E, "'width' operand must be an immediate"); 3221 return MatchOperand_ParseFail; 3222 } 3223 3224 int64_t Width = CE->getValue(); 3225 // The LSB must be in the range [1,32-lsb] 3226 if (Width < 1 || Width > 32 - LSB) { 3227 Error(E, "'width' operand must be in the range [1,32-lsb]"); 3228 return MatchOperand_ParseFail; 3229 } 3230 E = Parser.getTok().getLoc(); 3231 3232 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 3233 3234 return MatchOperand_Success; 3235 } 3236 3237 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3238 parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3239 // Check for a post-index addressing register operand. Specifically: 3240 // postidx_reg := '+' register {, shift} 3241 // | '-' register {, shift} 3242 // | register {, shift} 3243 3244 // This method must return MatchOperand_NoMatch without consuming any tokens 3245 // in the case where there is no match, as other alternatives take other 3246 // parse methods. 3247 AsmToken Tok = Parser.getTok(); 3248 SMLoc S = Tok.getLoc(); 3249 bool haveEaten = false; 3250 bool isAdd = true; 3251 int Reg = -1; 3252 if (Tok.is(AsmToken::Plus)) { 3253 Parser.Lex(); // Eat the '+' token. 3254 haveEaten = true; 3255 } else if (Tok.is(AsmToken::Minus)) { 3256 Parser.Lex(); // Eat the '-' token. 3257 isAdd = false; 3258 haveEaten = true; 3259 } 3260 if (Parser.getTok().is(AsmToken::Identifier)) 3261 Reg = tryParseRegister(); 3262 if (Reg == -1) { 3263 if (!haveEaten) 3264 return MatchOperand_NoMatch; 3265 Error(Parser.getTok().getLoc(), "register expected"); 3266 return MatchOperand_ParseFail; 3267 } 3268 SMLoc E = Parser.getTok().getLoc(); 3269 3270 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 3271 unsigned ShiftImm = 0; 3272 if (Parser.getTok().is(AsmToken::Comma)) { 3273 Parser.Lex(); // Eat the ','. 3274 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 3275 return MatchOperand_ParseFail; 3276 } 3277 3278 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 3279 ShiftImm, S, E)); 3280 3281 return MatchOperand_Success; 3282 } 3283 3284 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3285 parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3286 // Check for a post-index addressing register operand. Specifically: 3287 // am3offset := '+' register 3288 // | '-' register 3289 // | register 3290 // | # imm 3291 // | # + imm 3292 // | # - imm 3293 3294 // This method must return MatchOperand_NoMatch without consuming any tokens 3295 // in the case where there is no match, as other alternatives take other 3296 // parse methods. 3297 AsmToken Tok = Parser.getTok(); 3298 SMLoc S = Tok.getLoc(); 3299 3300 // Do immediates first, as we always parse those if we have a '#'. 3301 if (Parser.getTok().is(AsmToken::Hash)) { 3302 Parser.Lex(); // Eat the '#'. 3303 // Explicitly look for a '-', as we need to encode negative zero 3304 // differently. 3305 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3306 const MCExpr *Offset; 3307 if (getParser().ParseExpression(Offset)) 3308 return MatchOperand_ParseFail; 3309 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3310 if (!CE) { 3311 Error(S, "constant expression expected"); 3312 return MatchOperand_ParseFail; 3313 } 3314 SMLoc E = Tok.getLoc(); 3315 // Negative zero is encoded as the flag value INT32_MIN. 3316 int32_t Val = CE->getValue(); 3317 if (isNegative && Val == 0) 3318 Val = INT32_MIN; 3319 3320 Operands.push_back( 3321 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 3322 3323 return MatchOperand_Success; 3324 } 3325 3326 3327 bool haveEaten = false; 3328 bool isAdd = true; 3329 int Reg = -1; 3330 if (Tok.is(AsmToken::Plus)) { 3331 Parser.Lex(); // Eat the '+' token. 3332 haveEaten = true; 3333 } else if (Tok.is(AsmToken::Minus)) { 3334 Parser.Lex(); // Eat the '-' token. 3335 isAdd = false; 3336 haveEaten = true; 3337 } 3338 if (Parser.getTok().is(AsmToken::Identifier)) 3339 Reg = tryParseRegister(); 3340 if (Reg == -1) { 3341 if (!haveEaten) 3342 return MatchOperand_NoMatch; 3343 Error(Parser.getTok().getLoc(), "register expected"); 3344 return MatchOperand_ParseFail; 3345 } 3346 SMLoc E = Parser.getTok().getLoc(); 3347 3348 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3349 0, S, E)); 3350 3351 return MatchOperand_Success; 3352 } 3353 3354 /// cvtT2LdrdPre - Convert parsed operands to MCInst. 3355 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3356 /// when they refer multiple MIOperands inside a single one. 3357 bool ARMAsmParser:: 3358 cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3359 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3360 // Rt, Rt2 3361 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3362 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3363 // Create a writeback register dummy placeholder. 3364 Inst.addOperand(MCOperand::CreateReg(0)); 3365 // addr 3366 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3367 // pred 3368 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3369 return true; 3370 } 3371 3372 /// cvtT2StrdPre - Convert parsed operands to MCInst. 3373 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3374 /// when they refer multiple MIOperands inside a single one. 3375 bool ARMAsmParser:: 3376 cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3377 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3378 // Create a writeback register dummy placeholder. 3379 Inst.addOperand(MCOperand::CreateReg(0)); 3380 // Rt, Rt2 3381 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3382 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3383 // addr 3384 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3385 // pred 3386 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3387 return true; 3388 } 3389 3390 /// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3391 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3392 /// when they refer multiple MIOperands inside a single one. 3393 bool ARMAsmParser:: 3394 cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3395 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3396 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3397 3398 // Create a writeback register dummy placeholder. 3399 Inst.addOperand(MCOperand::CreateImm(0)); 3400 3401 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3402 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3403 return true; 3404 } 3405 3406 /// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3407 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3408 /// when they refer multiple MIOperands inside a single one. 3409 bool ARMAsmParser:: 3410 cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3411 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3412 // Create a writeback register dummy placeholder. 3413 Inst.addOperand(MCOperand::CreateImm(0)); 3414 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3415 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3416 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3417 return true; 3418 } 3419 3420 /// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3421 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3422 /// when they refer multiple MIOperands inside a single one. 3423 bool ARMAsmParser:: 3424 cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3425 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3426 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3427 3428 // Create a writeback register dummy placeholder. 3429 Inst.addOperand(MCOperand::CreateImm(0)); 3430 3431 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3432 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3433 return true; 3434 } 3435 3436 /// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3437 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3438 /// when they refer multiple MIOperands inside a single one. 3439 bool ARMAsmParser:: 3440 cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3441 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3442 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3443 3444 // Create a writeback register dummy placeholder. 3445 Inst.addOperand(MCOperand::CreateImm(0)); 3446 3447 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3448 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3449 return true; 3450 } 3451 3452 3453 /// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3454 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3455 /// when they refer multiple MIOperands inside a single one. 3456 bool ARMAsmParser:: 3457 cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3458 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3459 // Create a writeback register dummy placeholder. 3460 Inst.addOperand(MCOperand::CreateImm(0)); 3461 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3462 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3463 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3464 return true; 3465 } 3466 3467 /// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3468 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3469 /// when they refer multiple MIOperands inside a single one. 3470 bool ARMAsmParser:: 3471 cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3472 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3473 // Create a writeback register dummy placeholder. 3474 Inst.addOperand(MCOperand::CreateImm(0)); 3475 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3476 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3477 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3478 return true; 3479 } 3480 3481 /// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3482 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3483 /// when they refer multiple MIOperands inside a single one. 3484 bool ARMAsmParser:: 3485 cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3486 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3487 // Create a writeback register dummy placeholder. 3488 Inst.addOperand(MCOperand::CreateImm(0)); 3489 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3490 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3491 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3492 return true; 3493 } 3494 3495 /// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3496 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3497 /// when they refer multiple MIOperands inside a single one. 3498 bool ARMAsmParser:: 3499 cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3500 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3501 // Rt 3502 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3503 // Create a writeback register dummy placeholder. 3504 Inst.addOperand(MCOperand::CreateImm(0)); 3505 // addr 3506 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3507 // offset 3508 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3509 // pred 3510 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3511 return true; 3512 } 3513 3514 /// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3515 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3516 /// when they refer multiple MIOperands inside a single one. 3517 bool ARMAsmParser:: 3518 cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3519 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3520 // Rt 3521 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3522 // Create a writeback register dummy placeholder. 3523 Inst.addOperand(MCOperand::CreateImm(0)); 3524 // addr 3525 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3526 // offset 3527 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3528 // pred 3529 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3530 return true; 3531 } 3532 3533 /// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3534 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3535 /// when they refer multiple MIOperands inside a single one. 3536 bool ARMAsmParser:: 3537 cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3538 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3539 // Create a writeback register dummy placeholder. 3540 Inst.addOperand(MCOperand::CreateImm(0)); 3541 // Rt 3542 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3543 // addr 3544 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3545 // offset 3546 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3547 // pred 3548 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3549 return true; 3550 } 3551 3552 /// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3553 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3554 /// when they refer multiple MIOperands inside a single one. 3555 bool ARMAsmParser:: 3556 cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3557 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3558 // Create a writeback register dummy placeholder. 3559 Inst.addOperand(MCOperand::CreateImm(0)); 3560 // Rt 3561 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3562 // addr 3563 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3564 // offset 3565 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3566 // pred 3567 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3568 return true; 3569 } 3570 3571 /// cvtLdrdPre - Convert parsed operands to MCInst. 3572 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3573 /// when they refer multiple MIOperands inside a single one. 3574 bool ARMAsmParser:: 3575 cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3576 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3577 // Rt, Rt2 3578 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3579 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3580 // Create a writeback register dummy placeholder. 3581 Inst.addOperand(MCOperand::CreateImm(0)); 3582 // addr 3583 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3584 // pred 3585 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3586 return true; 3587 } 3588 3589 /// cvtStrdPre - Convert parsed operands to MCInst. 3590 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3591 /// when they refer multiple MIOperands inside a single one. 3592 bool ARMAsmParser:: 3593 cvtStrdPre(MCInst &Inst, unsigned Opcode, 3594 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3595 // Create a writeback register dummy placeholder. 3596 Inst.addOperand(MCOperand::CreateImm(0)); 3597 // Rt, Rt2 3598 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3599 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3600 // addr 3601 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3602 // pred 3603 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3604 return true; 3605 } 3606 3607 /// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3608 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3609 /// when they refer multiple MIOperands inside a single one. 3610 bool ARMAsmParser:: 3611 cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3612 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3613 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3614 // Create a writeback register dummy placeholder. 3615 Inst.addOperand(MCOperand::CreateImm(0)); 3616 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3617 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3618 return true; 3619 } 3620 3621 /// cvtThumbMultiple- Convert parsed operands to MCInst. 3622 /// Needed here because the Asm Gen Matcher can't handle properly tied operands 3623 /// when they refer multiple MIOperands inside a single one. 3624 bool ARMAsmParser:: 3625 cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3626 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3627 // The second source operand must be the same register as the destination 3628 // operand. 3629 if (Operands.size() == 6 && 3630 (((ARMOperand*)Operands[3])->getReg() != 3631 ((ARMOperand*)Operands[5])->getReg()) && 3632 (((ARMOperand*)Operands[3])->getReg() != 3633 ((ARMOperand*)Operands[4])->getReg())) { 3634 Error(Operands[3]->getStartLoc(), 3635 "destination register must match source register"); 3636 return false; 3637 } 3638 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3639 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3640 // If we have a three-operand form, make sure to set Rn to be the operand 3641 // that isn't the same as Rd. 3642 unsigned RegOp = 4; 3643 if (Operands.size() == 6 && 3644 ((ARMOperand*)Operands[4])->getReg() == 3645 ((ARMOperand*)Operands[3])->getReg()) 3646 RegOp = 5; 3647 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); 3648 Inst.addOperand(Inst.getOperand(0)); 3649 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3650 3651 return true; 3652 } 3653 3654 bool ARMAsmParser:: 3655 cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 3656 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3657 // Vd 3658 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3659 // Create a writeback register dummy placeholder. 3660 Inst.addOperand(MCOperand::CreateImm(0)); 3661 // Vn 3662 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3663 // pred 3664 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3665 return true; 3666 } 3667 3668 bool ARMAsmParser:: 3669 cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 3670 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3671 // Vd 3672 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3673 // Create a writeback register dummy placeholder. 3674 Inst.addOperand(MCOperand::CreateImm(0)); 3675 // Vn 3676 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3677 // Vm 3678 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3679 // pred 3680 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3681 return true; 3682 } 3683 3684 bool ARMAsmParser:: 3685 cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 3686 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3687 // Create a writeback register dummy placeholder. 3688 Inst.addOperand(MCOperand::CreateImm(0)); 3689 // Vn 3690 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3691 // Vt 3692 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3693 // pred 3694 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3695 return true; 3696 } 3697 3698 bool ARMAsmParser:: 3699 cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 3700 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3701 // Create a writeback register dummy placeholder. 3702 Inst.addOperand(MCOperand::CreateImm(0)); 3703 // Vn 3704 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3705 // Vm 3706 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3707 // Vt 3708 ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1); 3709 // pred 3710 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3711 return true; 3712 } 3713 3714 /// Parse an ARM memory expression, return false if successful else return true 3715 /// or an error. The first token must be a '[' when called. 3716 bool ARMAsmParser:: 3717 parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3718 SMLoc S, E; 3719 assert(Parser.getTok().is(AsmToken::LBrac) && 3720 "Token is not a Left Bracket"); 3721 S = Parser.getTok().getLoc(); 3722 Parser.Lex(); // Eat left bracket token. 3723 3724 const AsmToken &BaseRegTok = Parser.getTok(); 3725 int BaseRegNum = tryParseRegister(); 3726 if (BaseRegNum == -1) 3727 return Error(BaseRegTok.getLoc(), "register expected"); 3728 3729 // The next token must either be a comma or a closing bracket. 3730 const AsmToken &Tok = Parser.getTok(); 3731 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3732 return Error(Tok.getLoc(), "malformed memory operand"); 3733 3734 if (Tok.is(AsmToken::RBrac)) { 3735 E = Tok.getLoc(); 3736 Parser.Lex(); // Eat right bracket token. 3737 3738 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3739 0, 0, false, S, E)); 3740 3741 // If there's a pre-indexing writeback marker, '!', just add it as a token 3742 // operand. It's rather odd, but syntactically valid. 3743 if (Parser.getTok().is(AsmToken::Exclaim)) { 3744 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3745 Parser.Lex(); // Eat the '!'. 3746 } 3747 3748 return false; 3749 } 3750 3751 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3752 Parser.Lex(); // Eat the comma. 3753 3754 // If we have a ':', it's an alignment specifier. 3755 if (Parser.getTok().is(AsmToken::Colon)) { 3756 Parser.Lex(); // Eat the ':'. 3757 E = Parser.getTok().getLoc(); 3758 3759 const MCExpr *Expr; 3760 if (getParser().ParseExpression(Expr)) 3761 return true; 3762 3763 // The expression has to be a constant. Memory references with relocations 3764 // don't come through here, as they use the <label> forms of the relevant 3765 // instructions. 3766 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3767 if (!CE) 3768 return Error (E, "constant expression expected"); 3769 3770 unsigned Align = 0; 3771 switch (CE->getValue()) { 3772 default: 3773 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3774 case 64: Align = 8; break; 3775 case 128: Align = 16; break; 3776 case 256: Align = 32; break; 3777 } 3778 3779 // Now we should have the closing ']' 3780 E = Parser.getTok().getLoc(); 3781 if (Parser.getTok().isNot(AsmToken::RBrac)) 3782 return Error(E, "']' expected"); 3783 Parser.Lex(); // Eat right bracket token. 3784 3785 // Don't worry about range checking the value here. That's handled by 3786 // the is*() predicates. 3787 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3788 ARM_AM::no_shift, 0, Align, 3789 false, S, E)); 3790 3791 // If there's a pre-indexing writeback marker, '!', just add it as a token 3792 // operand. 3793 if (Parser.getTok().is(AsmToken::Exclaim)) { 3794 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3795 Parser.Lex(); // Eat the '!'. 3796 } 3797 3798 return false; 3799 } 3800 3801 // If we have a '#', it's an immediate offset, else assume it's a register 3802 // offset. Be friendly and also accept a plain integer (without a leading 3803 // hash) for gas compatibility. 3804 if (Parser.getTok().is(AsmToken::Hash) || 3805 Parser.getTok().is(AsmToken::Integer)) { 3806 if (Parser.getTok().is(AsmToken::Hash)) 3807 Parser.Lex(); // Eat the '#'. 3808 E = Parser.getTok().getLoc(); 3809 3810 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3811 const MCExpr *Offset; 3812 if (getParser().ParseExpression(Offset)) 3813 return true; 3814 3815 // The expression has to be a constant. Memory references with relocations 3816 // don't come through here, as they use the <label> forms of the relevant 3817 // instructions. 3818 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3819 if (!CE) 3820 return Error (E, "constant expression expected"); 3821 3822 // If the constant was #-0, represent it as INT32_MIN. 3823 int32_t Val = CE->getValue(); 3824 if (isNegative && Val == 0) 3825 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3826 3827 // Now we should have the closing ']' 3828 E = Parser.getTok().getLoc(); 3829 if (Parser.getTok().isNot(AsmToken::RBrac)) 3830 return Error(E, "']' expected"); 3831 Parser.Lex(); // Eat right bracket token. 3832 3833 // Don't worry about range checking the value here. That's handled by 3834 // the is*() predicates. 3835 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3836 ARM_AM::no_shift, 0, 0, 3837 false, S, E)); 3838 3839 // If there's a pre-indexing writeback marker, '!', just add it as a token 3840 // operand. 3841 if (Parser.getTok().is(AsmToken::Exclaim)) { 3842 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3843 Parser.Lex(); // Eat the '!'. 3844 } 3845 3846 return false; 3847 } 3848 3849 // The register offset is optionally preceded by a '+' or '-' 3850 bool isNegative = false; 3851 if (Parser.getTok().is(AsmToken::Minus)) { 3852 isNegative = true; 3853 Parser.Lex(); // Eat the '-'. 3854 } else if (Parser.getTok().is(AsmToken::Plus)) { 3855 // Nothing to do. 3856 Parser.Lex(); // Eat the '+'. 3857 } 3858 3859 E = Parser.getTok().getLoc(); 3860 int OffsetRegNum = tryParseRegister(); 3861 if (OffsetRegNum == -1) 3862 return Error(E, "register expected"); 3863 3864 // If there's a shift operator, handle it. 3865 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3866 unsigned ShiftImm = 0; 3867 if (Parser.getTok().is(AsmToken::Comma)) { 3868 Parser.Lex(); // Eat the ','. 3869 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3870 return true; 3871 } 3872 3873 // Now we should have the closing ']' 3874 E = Parser.getTok().getLoc(); 3875 if (Parser.getTok().isNot(AsmToken::RBrac)) 3876 return Error(E, "']' expected"); 3877 Parser.Lex(); // Eat right bracket token. 3878 3879 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3880 ShiftType, ShiftImm, 0, isNegative, 3881 S, E)); 3882 3883 // If there's a pre-indexing writeback marker, '!', just add it as a token 3884 // operand. 3885 if (Parser.getTok().is(AsmToken::Exclaim)) { 3886 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3887 Parser.Lex(); // Eat the '!'. 3888 } 3889 3890 return false; 3891 } 3892 3893 /// parseMemRegOffsetShift - one of these two: 3894 /// ( lsl | lsr | asr | ror ) , # shift_amount 3895 /// rrx 3896 /// return true if it parses a shift otherwise it returns false. 3897 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3898 unsigned &Amount) { 3899 SMLoc Loc = Parser.getTok().getLoc(); 3900 const AsmToken &Tok = Parser.getTok(); 3901 if (Tok.isNot(AsmToken::Identifier)) 3902 return true; 3903 StringRef ShiftName = Tok.getString(); 3904 if (ShiftName == "lsl" || ShiftName == "LSL") 3905 St = ARM_AM::lsl; 3906 else if (ShiftName == "lsr" || ShiftName == "LSR") 3907 St = ARM_AM::lsr; 3908 else if (ShiftName == "asr" || ShiftName == "ASR") 3909 St = ARM_AM::asr; 3910 else if (ShiftName == "ror" || ShiftName == "ROR") 3911 St = ARM_AM::ror; 3912 else if (ShiftName == "rrx" || ShiftName == "RRX") 3913 St = ARM_AM::rrx; 3914 else 3915 return Error(Loc, "illegal shift operator"); 3916 Parser.Lex(); // Eat shift type token. 3917 3918 // rrx stands alone. 3919 Amount = 0; 3920 if (St != ARM_AM::rrx) { 3921 Loc = Parser.getTok().getLoc(); 3922 // A '#' and a shift amount. 3923 const AsmToken &HashTok = Parser.getTok(); 3924 if (HashTok.isNot(AsmToken::Hash)) 3925 return Error(HashTok.getLoc(), "'#' expected"); 3926 Parser.Lex(); // Eat hash token. 3927 3928 const MCExpr *Expr; 3929 if (getParser().ParseExpression(Expr)) 3930 return true; 3931 // Range check the immediate. 3932 // lsl, ror: 0 <= imm <= 31 3933 // lsr, asr: 0 <= imm <= 32 3934 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3935 if (!CE) 3936 return Error(Loc, "shift amount must be an immediate"); 3937 int64_t Imm = CE->getValue(); 3938 if (Imm < 0 || 3939 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3940 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3941 return Error(Loc, "immediate shift value out of range"); 3942 Amount = Imm; 3943 } 3944 3945 return false; 3946 } 3947 3948 /// parseFPImm - A floating point immediate expression operand. 3949 ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3950 parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3951 SMLoc S = Parser.getTok().getLoc(); 3952 3953 if (Parser.getTok().isNot(AsmToken::Hash)) 3954 return MatchOperand_NoMatch; 3955 3956 // Disambiguate the VMOV forms that can accept an FP immediate. 3957 // vmov.f32 <sreg>, #imm 3958 // vmov.f64 <dreg>, #imm 3959 // vmov.f32 <dreg>, #imm @ vector f32x2 3960 // vmov.f32 <qreg>, #imm @ vector f32x4 3961 // 3962 // There are also the NEON VMOV instructions which expect an 3963 // integer constant. Make sure we don't try to parse an FPImm 3964 // for these: 3965 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 3966 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 3967 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 3968 TyOp->getToken() != ".f64")) 3969 return MatchOperand_NoMatch; 3970 3971 Parser.Lex(); // Eat the '#'. 3972 3973 // Handle negation, as that still comes through as a separate token. 3974 bool isNegative = false; 3975 if (Parser.getTok().is(AsmToken::Minus)) { 3976 isNegative = true; 3977 Parser.Lex(); 3978 } 3979 const AsmToken &Tok = Parser.getTok(); 3980 if (Tok.is(AsmToken::Real)) { 3981 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3982 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3983 // If we had a '-' in front, toggle the sign bit. 3984 IntVal ^= (uint64_t)isNegative << 63; 3985 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3986 Parser.Lex(); // Eat the token. 3987 if (Val == -1) { 3988 TokError("floating point value out of range"); 3989 return MatchOperand_ParseFail; 3990 } 3991 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3992 return MatchOperand_Success; 3993 } 3994 if (Tok.is(AsmToken::Integer)) { 3995 int64_t Val = Tok.getIntVal(); 3996 Parser.Lex(); // Eat the token. 3997 if (Val > 255 || Val < 0) { 3998 TokError("encoded floating point value out of range"); 3999 return MatchOperand_ParseFail; 4000 } 4001 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 4002 return MatchOperand_Success; 4003 } 4004 4005 TokError("invalid floating point immediate"); 4006 return MatchOperand_ParseFail; 4007 } 4008 /// Parse a arm instruction operand. For now this parses the operand regardless 4009 /// of the mnemonic. 4010 bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4011 StringRef Mnemonic) { 4012 SMLoc S, E; 4013 4014 // Check if the current operand has a custom associated parser, if so, try to 4015 // custom parse the operand, or fallback to the general approach. 4016 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 4017 if (ResTy == MatchOperand_Success) 4018 return false; 4019 // If there wasn't a custom match, try the generic matcher below. Otherwise, 4020 // there was a match, but an error occurred, in which case, just return that 4021 // the operand parsing failed. 4022 if (ResTy == MatchOperand_ParseFail) 4023 return true; 4024 4025 switch (getLexer().getKind()) { 4026 default: 4027 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 4028 return true; 4029 case AsmToken::Identifier: { 4030 // If this is VMRS, check for the apsr_nzcv operand. 4031 if (!tryParseRegisterWithWriteBack(Operands)) 4032 return false; 4033 int Res = tryParseShiftRegister(Operands); 4034 if (Res == 0) // success 4035 return false; 4036 else if (Res == -1) // irrecoverable error 4037 return true; 4038 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 4039 S = Parser.getTok().getLoc(); 4040 Parser.Lex(); 4041 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 4042 return false; 4043 } 4044 4045 // Fall though for the Identifier case that is not a register or a 4046 // special name. 4047 } 4048 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 4049 case AsmToken::Integer: // things like 1f and 2b as a branch targets 4050 case AsmToken::String: // quoted label names. 4051 case AsmToken::Dot: { // . as a branch target 4052 // This was not a register so parse other operands that start with an 4053 // identifier (like labels) as expressions and create them as immediates. 4054 const MCExpr *IdVal; 4055 S = Parser.getTok().getLoc(); 4056 if (getParser().ParseExpression(IdVal)) 4057 return true; 4058 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4059 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 4060 return false; 4061 } 4062 case AsmToken::LBrac: 4063 return parseMemory(Operands); 4064 case AsmToken::LCurly: 4065 return parseRegisterList(Operands); 4066 case AsmToken::Hash: { 4067 // #42 -> immediate. 4068 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 4069 S = Parser.getTok().getLoc(); 4070 Parser.Lex(); 4071 bool isNegative = Parser.getTok().is(AsmToken::Minus); 4072 const MCExpr *ImmVal; 4073 if (getParser().ParseExpression(ImmVal)) 4074 return true; 4075 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 4076 if (CE) { 4077 int32_t Val = CE->getValue(); 4078 if (isNegative && Val == 0) 4079 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 4080 } 4081 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4082 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 4083 return false; 4084 } 4085 case AsmToken::Colon: { 4086 // ":lower16:" and ":upper16:" expression prefixes 4087 // FIXME: Check it's an expression prefix, 4088 // e.g. (FOO - :lower16:BAR) isn't legal. 4089 ARMMCExpr::VariantKind RefKind; 4090 if (parsePrefix(RefKind)) 4091 return true; 4092 4093 const MCExpr *SubExprVal; 4094 if (getParser().ParseExpression(SubExprVal)) 4095 return true; 4096 4097 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 4098 getContext()); 4099 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 4100 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 4101 return false; 4102 } 4103 } 4104 } 4105 4106 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 4107 // :lower16: and :upper16:. 4108 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 4109 RefKind = ARMMCExpr::VK_ARM_None; 4110 4111 // :lower16: and :upper16: modifiers 4112 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 4113 Parser.Lex(); // Eat ':' 4114 4115 if (getLexer().isNot(AsmToken::Identifier)) { 4116 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 4117 return true; 4118 } 4119 4120 StringRef IDVal = Parser.getTok().getIdentifier(); 4121 if (IDVal == "lower16") { 4122 RefKind = ARMMCExpr::VK_ARM_LO16; 4123 } else if (IDVal == "upper16") { 4124 RefKind = ARMMCExpr::VK_ARM_HI16; 4125 } else { 4126 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 4127 return true; 4128 } 4129 Parser.Lex(); 4130 4131 if (getLexer().isNot(AsmToken::Colon)) { 4132 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 4133 return true; 4134 } 4135 Parser.Lex(); // Eat the last ':' 4136 return false; 4137 } 4138 4139 /// \brief Given a mnemonic, split out possible predication code and carry 4140 /// setting letters to form a canonical mnemonic and flags. 4141 // 4142 // FIXME: Would be nice to autogen this. 4143 // FIXME: This is a bit of a maze of special cases. 4144 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 4145 unsigned &PredicationCode, 4146 bool &CarrySetting, 4147 unsigned &ProcessorIMod, 4148 StringRef &ITMask) { 4149 PredicationCode = ARMCC::AL; 4150 CarrySetting = false; 4151 ProcessorIMod = 0; 4152 4153 // Ignore some mnemonics we know aren't predicated forms. 4154 // 4155 // FIXME: Would be nice to autogen this. 4156 if ((Mnemonic == "movs" && isThumb()) || 4157 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 4158 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 4159 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 4160 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 4161 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 4162 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 4163 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 4164 return Mnemonic; 4165 4166 // First, split out any predication code. Ignore mnemonics we know aren't 4167 // predicated but do have a carry-set and so weren't caught above. 4168 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 4169 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 4170 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 4171 Mnemonic != "sbcs" && Mnemonic != "rscs") { 4172 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 4173 .Case("eq", ARMCC::EQ) 4174 .Case("ne", ARMCC::NE) 4175 .Case("hs", ARMCC::HS) 4176 .Case("cs", ARMCC::HS) 4177 .Case("lo", ARMCC::LO) 4178 .Case("cc", ARMCC::LO) 4179 .Case("mi", ARMCC::MI) 4180 .Case("pl", ARMCC::PL) 4181 .Case("vs", ARMCC::VS) 4182 .Case("vc", ARMCC::VC) 4183 .Case("hi", ARMCC::HI) 4184 .Case("ls", ARMCC::LS) 4185 .Case("ge", ARMCC::GE) 4186 .Case("lt", ARMCC::LT) 4187 .Case("gt", ARMCC::GT) 4188 .Case("le", ARMCC::LE) 4189 .Case("al", ARMCC::AL) 4190 .Default(~0U); 4191 if (CC != ~0U) { 4192 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 4193 PredicationCode = CC; 4194 } 4195 } 4196 4197 // Next, determine if we have a carry setting bit. We explicitly ignore all 4198 // the instructions we know end in 's'. 4199 if (Mnemonic.endswith("s") && 4200 !(Mnemonic == "cps" || Mnemonic == "mls" || 4201 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 4202 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 4203 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 4204 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 4205 (Mnemonic == "movs" && isThumb()))) { 4206 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 4207 CarrySetting = true; 4208 } 4209 4210 // The "cps" instruction can have a interrupt mode operand which is glued into 4211 // the mnemonic. Check if this is the case, split it and parse the imod op 4212 if (Mnemonic.startswith("cps")) { 4213 // Split out any imod code. 4214 unsigned IMod = 4215 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 4216 .Case("ie", ARM_PROC::IE) 4217 .Case("id", ARM_PROC::ID) 4218 .Default(~0U); 4219 if (IMod != ~0U) { 4220 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 4221 ProcessorIMod = IMod; 4222 } 4223 } 4224 4225 // The "it" instruction has the condition mask on the end of the mnemonic. 4226 if (Mnemonic.startswith("it")) { 4227 ITMask = Mnemonic.slice(2, Mnemonic.size()); 4228 Mnemonic = Mnemonic.slice(0, 2); 4229 } 4230 4231 return Mnemonic; 4232 } 4233 4234 /// \brief Given a canonical mnemonic, determine if the instruction ever allows 4235 /// inclusion of carry set or predication code operands. 4236 // 4237 // FIXME: It would be nice to autogen this. 4238 void ARMAsmParser:: 4239 getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 4240 bool &CanAcceptPredicationCode) { 4241 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 4242 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 4243 Mnemonic == "add" || Mnemonic == "adc" || 4244 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 4245 Mnemonic == "orr" || Mnemonic == "mvn" || 4246 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 4247 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 4248 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 4249 Mnemonic == "mla" || Mnemonic == "smlal" || 4250 Mnemonic == "umlal" || Mnemonic == "umull"))) { 4251 CanAcceptCarrySet = true; 4252 } else 4253 CanAcceptCarrySet = false; 4254 4255 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 4256 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 4257 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 4258 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 4259 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 4260 (Mnemonic == "clrex" && !isThumb()) || 4261 (Mnemonic == "nop" && isThumbOne()) || 4262 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 4263 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 4264 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 4265 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 4266 !isThumb()) || 4267 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 4268 CanAcceptPredicationCode = false; 4269 } else 4270 CanAcceptPredicationCode = true; 4271 4272 if (isThumb()) { 4273 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 4274 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 4275 CanAcceptPredicationCode = false; 4276 } 4277 } 4278 4279 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 4280 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4281 // FIXME: This is all horribly hacky. We really need a better way to deal 4282 // with optional operands like this in the matcher table. 4283 4284 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 4285 // another does not. Specifically, the MOVW instruction does not. So we 4286 // special case it here and remove the defaulted (non-setting) cc_out 4287 // operand if that's the instruction we're trying to match. 4288 // 4289 // We do this as post-processing of the explicit operands rather than just 4290 // conditionally adding the cc_out in the first place because we need 4291 // to check the type of the parsed immediate operand. 4292 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 4293 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 4294 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 4295 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4296 return true; 4297 4298 // Register-register 'add' for thumb does not have a cc_out operand 4299 // when there are only two register operands. 4300 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 4301 static_cast<ARMOperand*>(Operands[3])->isReg() && 4302 static_cast<ARMOperand*>(Operands[4])->isReg() && 4303 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4304 return true; 4305 // Register-register 'add' for thumb does not have a cc_out operand 4306 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 4307 // have to check the immediate range here since Thumb2 has a variant 4308 // that can handle a different range and has a cc_out operand. 4309 if (((isThumb() && Mnemonic == "add") || 4310 (isThumbTwo() && Mnemonic == "sub")) && 4311 Operands.size() == 6 && 4312 static_cast<ARMOperand*>(Operands[3])->isReg() && 4313 static_cast<ARMOperand*>(Operands[4])->isReg() && 4314 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 4315 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4316 (static_cast<ARMOperand*>(Operands[5])->isReg() || 4317 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 4318 return true; 4319 // For Thumb2, add/sub immediate does not have a cc_out operand for the 4320 // imm0_4095 variant. That's the least-preferred variant when 4321 // selecting via the generic "add" mnemonic, so to know that we 4322 // should remove the cc_out operand, we have to explicitly check that 4323 // it's not one of the other variants. Ugh. 4324 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 4325 Operands.size() == 6 && 4326 static_cast<ARMOperand*>(Operands[3])->isReg() && 4327 static_cast<ARMOperand*>(Operands[4])->isReg() && 4328 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4329 // Nest conditions rather than one big 'if' statement for readability. 4330 // 4331 // If either register is a high reg, it's either one of the SP 4332 // variants (handled above) or a 32-bit encoding, so we just 4333 // check against T3. 4334 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4335 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 4336 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 4337 return false; 4338 // If both registers are low, we're in an IT block, and the immediate is 4339 // in range, we should use encoding T1 instead, which has a cc_out. 4340 if (inITBlock() && 4341 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4342 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 4343 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 4344 return false; 4345 4346 // Otherwise, we use encoding T4, which does not have a cc_out 4347 // operand. 4348 return true; 4349 } 4350 4351 // The thumb2 multiply instruction doesn't have a CCOut register, so 4352 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 4353 // use the 16-bit encoding or not. 4354 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4355 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4356 static_cast<ARMOperand*>(Operands[3])->isReg() && 4357 static_cast<ARMOperand*>(Operands[4])->isReg() && 4358 static_cast<ARMOperand*>(Operands[5])->isReg() && 4359 // If the registers aren't low regs, the destination reg isn't the 4360 // same as one of the source regs, or the cc_out operand is zero 4361 // outside of an IT block, we have to use the 32-bit encoding, so 4362 // remove the cc_out operand. 4363 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4364 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4365 !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) || 4366 !inITBlock() || 4367 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4368 static_cast<ARMOperand*>(Operands[5])->getReg() && 4369 static_cast<ARMOperand*>(Operands[3])->getReg() != 4370 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4371 return true; 4372 4373 // Also check the 'mul' syntax variant that doesn't specify an explicit 4374 // destination register. 4375 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && 4376 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4377 static_cast<ARMOperand*>(Operands[3])->isReg() && 4378 static_cast<ARMOperand*>(Operands[4])->isReg() && 4379 // If the registers aren't low regs or the cc_out operand is zero 4380 // outside of an IT block, we have to use the 32-bit encoding, so 4381 // remove the cc_out operand. 4382 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4383 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4384 !inITBlock())) 4385 return true; 4386 4387 4388 4389 // Register-register 'add/sub' for thumb does not have a cc_out operand 4390 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4391 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4392 // right, this will result in better diagnostics (which operand is off) 4393 // anyway. 4394 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4395 (Operands.size() == 5 || Operands.size() == 6) && 4396 static_cast<ARMOperand*>(Operands[3])->isReg() && 4397 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4398 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4399 return true; 4400 4401 return false; 4402 } 4403 4404 static bool isDataTypeToken(StringRef Tok) { 4405 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || 4406 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || 4407 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || 4408 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || 4409 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || 4410 Tok == ".f" || Tok == ".d"; 4411 } 4412 4413 // FIXME: This bit should probably be handled via an explicit match class 4414 // in the .td files that matches the suffix instead of having it be 4415 // a literal string token the way it is now. 4416 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { 4417 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); 4418 } 4419 4420 /// Parse an arm instruction mnemonic followed by its operands. 4421 bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4422 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4423 // Create the leading tokens for the mnemonic, split by '.' characters. 4424 size_t Start = 0, Next = Name.find('.'); 4425 StringRef Mnemonic = Name.slice(Start, Next); 4426 4427 // Split out the predication code and carry setting flag from the mnemonic. 4428 unsigned PredicationCode; 4429 unsigned ProcessorIMod; 4430 bool CarrySetting; 4431 StringRef ITMask; 4432 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4433 ProcessorIMod, ITMask); 4434 4435 // In Thumb1, only the branch (B) instruction can be predicated. 4436 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4437 Parser.EatToEndOfStatement(); 4438 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4439 } 4440 4441 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4442 4443 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4444 // is the mask as it will be for the IT encoding if the conditional 4445 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4446 // where the conditional bit0 is zero, the instruction post-processing 4447 // will adjust the mask accordingly. 4448 if (Mnemonic == "it") { 4449 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4450 if (ITMask.size() > 3) { 4451 Parser.EatToEndOfStatement(); 4452 return Error(Loc, "too many conditions on IT instruction"); 4453 } 4454 unsigned Mask = 8; 4455 for (unsigned i = ITMask.size(); i != 0; --i) { 4456 char pos = ITMask[i - 1]; 4457 if (pos != 't' && pos != 'e') { 4458 Parser.EatToEndOfStatement(); 4459 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4460 } 4461 Mask >>= 1; 4462 if (ITMask[i - 1] == 't') 4463 Mask |= 8; 4464 } 4465 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4466 } 4467 4468 // FIXME: This is all a pretty gross hack. We should automatically handle 4469 // optional operands like this via tblgen. 4470 4471 // Next, add the CCOut and ConditionCode operands, if needed. 4472 // 4473 // For mnemonics which can ever incorporate a carry setting bit or predication 4474 // code, our matching model involves us always generating CCOut and 4475 // ConditionCode operands to match the mnemonic "as written" and then we let 4476 // the matcher deal with finding the right instruction or generating an 4477 // appropriate error. 4478 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4479 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4480 4481 // If we had a carry-set on an instruction that can't do that, issue an 4482 // error. 4483 if (!CanAcceptCarrySet && CarrySetting) { 4484 Parser.EatToEndOfStatement(); 4485 return Error(NameLoc, "instruction '" + Mnemonic + 4486 "' can not set flags, but 's' suffix specified"); 4487 } 4488 // If we had a predication code on an instruction that can't do that, issue an 4489 // error. 4490 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4491 Parser.EatToEndOfStatement(); 4492 return Error(NameLoc, "instruction '" + Mnemonic + 4493 "' is not predicable, but condition code specified"); 4494 } 4495 4496 // Add the carry setting operand, if necessary. 4497 if (CanAcceptCarrySet) { 4498 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4499 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4500 Loc)); 4501 } 4502 4503 // Add the predication code operand, if necessary. 4504 if (CanAcceptPredicationCode) { 4505 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4506 CarrySetting); 4507 Operands.push_back(ARMOperand::CreateCondCode( 4508 ARMCC::CondCodes(PredicationCode), Loc)); 4509 } 4510 4511 // Add the processor imod operand, if necessary. 4512 if (ProcessorIMod) { 4513 Operands.push_back(ARMOperand::CreateImm( 4514 MCConstantExpr::Create(ProcessorIMod, getContext()), 4515 NameLoc, NameLoc)); 4516 } 4517 4518 // Add the remaining tokens in the mnemonic. 4519 while (Next != StringRef::npos) { 4520 Start = Next; 4521 Next = Name.find('.', Start + 1); 4522 StringRef ExtraToken = Name.slice(Start, Next); 4523 4524 // Some NEON instructions have an optional datatype suffix that is 4525 // completely ignored. Check for that. 4526 if (isDataTypeToken(ExtraToken) && 4527 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) 4528 continue; 4529 4530 if (ExtraToken != ".n") { 4531 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4532 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4533 } 4534 } 4535 4536 // Read the remaining operands. 4537 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4538 // Read the first operand. 4539 if (parseOperand(Operands, Mnemonic)) { 4540 Parser.EatToEndOfStatement(); 4541 return true; 4542 } 4543 4544 while (getLexer().is(AsmToken::Comma)) { 4545 Parser.Lex(); // Eat the comma. 4546 4547 // Parse and remember the operand. 4548 if (parseOperand(Operands, Mnemonic)) { 4549 Parser.EatToEndOfStatement(); 4550 return true; 4551 } 4552 } 4553 } 4554 4555 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4556 SMLoc Loc = getLexer().getLoc(); 4557 Parser.EatToEndOfStatement(); 4558 return Error(Loc, "unexpected token in argument list"); 4559 } 4560 4561 Parser.Lex(); // Consume the EndOfStatement 4562 4563 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4564 // do and don't have a cc_out optional-def operand. With some spot-checks 4565 // of the operand list, we can figure out which variant we're trying to 4566 // parse and adjust accordingly before actually matching. We shouldn't ever 4567 // try to remove a cc_out operand that was explicitly set on the the 4568 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4569 // table driven matcher doesn't fit well with the ARM instruction set. 4570 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4571 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4572 Operands.erase(Operands.begin() + 1); 4573 delete Op; 4574 } 4575 4576 // ARM mode 'blx' need special handling, as the register operand version 4577 // is predicable, but the label operand version is not. So, we can't rely 4578 // on the Mnemonic based checking to correctly figure out when to put 4579 // a k_CondCode operand in the list. If we're trying to match the label 4580 // version, remove the k_CondCode operand here. 4581 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4582 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4583 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4584 Operands.erase(Operands.begin() + 1); 4585 delete Op; 4586 } 4587 4588 // The vector-compare-to-zero instructions have a literal token "#0" at 4589 // the end that comes to here as an immediate operand. Convert it to a 4590 // token to play nicely with the matcher. 4591 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4592 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4593 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4594 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4595 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4596 if (CE && CE->getValue() == 0) { 4597 Operands.erase(Operands.begin() + 5); 4598 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4599 delete Op; 4600 } 4601 } 4602 // VCMP{E} does the same thing, but with a different operand count. 4603 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4604 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4605 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4606 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4607 if (CE && CE->getValue() == 0) { 4608 Operands.erase(Operands.begin() + 4); 4609 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4610 delete Op; 4611 } 4612 } 4613 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4614 // end. Convert it to a token here. 4615 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4616 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4617 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4618 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4619 if (CE && CE->getValue() == 0) { 4620 Operands.erase(Operands.begin() + 5); 4621 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4622 delete Op; 4623 } 4624 } 4625 4626 return false; 4627 } 4628 4629 // Validate context-sensitive operand constraints. 4630 4631 // return 'true' if register list contains non-low GPR registers, 4632 // 'false' otherwise. If Reg is in the register list or is HiReg, set 4633 // 'containsReg' to true. 4634 static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4635 unsigned HiReg, bool &containsReg) { 4636 containsReg = false; 4637 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4638 unsigned OpReg = Inst.getOperand(i).getReg(); 4639 if (OpReg == Reg) 4640 containsReg = true; 4641 // Anything other than a low register isn't legal here. 4642 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4643 return true; 4644 } 4645 return false; 4646 } 4647 4648 // Check if the specified regisgter is in the register list of the inst, 4649 // starting at the indicated operand number. 4650 static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4651 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4652 unsigned OpReg = Inst.getOperand(i).getReg(); 4653 if (OpReg == Reg) 4654 return true; 4655 } 4656 return false; 4657 } 4658 4659 // FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4660 // the ARMInsts array) instead. Getting that here requires awkward 4661 // API changes, though. Better way? 4662 namespace llvm { 4663 extern const MCInstrDesc ARMInsts[]; 4664 } 4665 static const MCInstrDesc &getInstDesc(unsigned Opcode) { 4666 return ARMInsts[Opcode]; 4667 } 4668 4669 // FIXME: We would really like to be able to tablegen'erate this. 4670 bool ARMAsmParser:: 4671 validateInstruction(MCInst &Inst, 4672 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4673 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4674 SMLoc Loc = Operands[0]->getStartLoc(); 4675 // Check the IT block state first. 4676 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4677 // being allowed in IT blocks, but not being predicable. It just always 4678 // executes. 4679 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4680 unsigned bit = 1; 4681 if (ITState.FirstCond) 4682 ITState.FirstCond = false; 4683 else 4684 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4685 // The instruction must be predicable. 4686 if (!MCID.isPredicable()) 4687 return Error(Loc, "instructions in IT block must be predicable"); 4688 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4689 unsigned ITCond = bit ? ITState.Cond : 4690 ARMCC::getOppositeCondition(ITState.Cond); 4691 if (Cond != ITCond) { 4692 // Find the condition code Operand to get its SMLoc information. 4693 SMLoc CondLoc; 4694 for (unsigned i = 1; i < Operands.size(); ++i) 4695 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4696 CondLoc = Operands[i]->getStartLoc(); 4697 return Error(CondLoc, "incorrect condition in IT block; got '" + 4698 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4699 "', but expected '" + 4700 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4701 } 4702 // Check for non-'al' condition codes outside of the IT block. 4703 } else if (isThumbTwo() && MCID.isPredicable() && 4704 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4705 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4706 Inst.getOpcode() != ARM::t2B) 4707 return Error(Loc, "predicated instructions must be in IT block"); 4708 4709 switch (Inst.getOpcode()) { 4710 case ARM::LDRD: 4711 case ARM::LDRD_PRE: 4712 case ARM::LDRD_POST: 4713 case ARM::LDREXD: { 4714 // Rt2 must be Rt + 1. 4715 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4716 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4717 if (Rt2 != Rt + 1) 4718 return Error(Operands[3]->getStartLoc(), 4719 "destination operands must be sequential"); 4720 return false; 4721 } 4722 case ARM::STRD: { 4723 // Rt2 must be Rt + 1. 4724 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4725 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4726 if (Rt2 != Rt + 1) 4727 return Error(Operands[3]->getStartLoc(), 4728 "source operands must be sequential"); 4729 return false; 4730 } 4731 case ARM::STRD_PRE: 4732 case ARM::STRD_POST: 4733 case ARM::STREXD: { 4734 // Rt2 must be Rt + 1. 4735 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4736 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4737 if (Rt2 != Rt + 1) 4738 return Error(Operands[3]->getStartLoc(), 4739 "source operands must be sequential"); 4740 return false; 4741 } 4742 case ARM::SBFX: 4743 case ARM::UBFX: { 4744 // width must be in range [1, 32-lsb] 4745 unsigned lsb = Inst.getOperand(2).getImm(); 4746 unsigned widthm1 = Inst.getOperand(3).getImm(); 4747 if (widthm1 >= 32 - lsb) 4748 return Error(Operands[5]->getStartLoc(), 4749 "bitfield width must be in range [1,32-lsb]"); 4750 return false; 4751 } 4752 case ARM::tLDMIA: { 4753 // If we're parsing Thumb2, the .w variant is available and handles 4754 // most cases that are normally illegal for a Thumb1 LDM 4755 // instruction. We'll make the transformation in processInstruction() 4756 // if necessary. 4757 // 4758 // Thumb LDM instructions are writeback iff the base register is not 4759 // in the register list. 4760 unsigned Rn = Inst.getOperand(0).getReg(); 4761 bool hasWritebackToken = 4762 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4763 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4764 bool listContainsBase; 4765 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4766 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4767 "registers must be in range r0-r7"); 4768 // If we should have writeback, then there should be a '!' token. 4769 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4770 return Error(Operands[2]->getStartLoc(), 4771 "writeback operator '!' expected"); 4772 // If we should not have writeback, there must not be a '!'. This is 4773 // true even for the 32-bit wide encodings. 4774 if (listContainsBase && hasWritebackToken) 4775 return Error(Operands[3]->getStartLoc(), 4776 "writeback operator '!' not allowed when base register " 4777 "in register list"); 4778 4779 break; 4780 } 4781 case ARM::t2LDMIA_UPD: { 4782 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4783 return Error(Operands[4]->getStartLoc(), 4784 "writeback operator '!' not allowed when base register " 4785 "in register list"); 4786 break; 4787 } 4788 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 4789 // so only issue a diagnostic for thumb1. The instructions will be 4790 // switched to the t2 encodings in processInstruction() if necessary. 4791 case ARM::tPOP: { 4792 bool listContainsBase; 4793 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) && 4794 !isThumbTwo()) 4795 return Error(Operands[2]->getStartLoc(), 4796 "registers must be in range r0-r7 or pc"); 4797 break; 4798 } 4799 case ARM::tPUSH: { 4800 bool listContainsBase; 4801 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) && 4802 !isThumbTwo()) 4803 return Error(Operands[2]->getStartLoc(), 4804 "registers must be in range r0-r7 or lr"); 4805 break; 4806 } 4807 case ARM::tSTMIA_UPD: { 4808 bool listContainsBase; 4809 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4810 return Error(Operands[4]->getStartLoc(), 4811 "registers must be in range r0-r7"); 4812 break; 4813 } 4814 } 4815 4816 return false; 4817 } 4818 4819 static unsigned getRealVSTLNOpcode(unsigned Opc) { 4820 switch(Opc) { 4821 default: assert(0 && "unexpected opcode!"); 4822 case ARM::VST1LNdWB_fixed_Asm_8: return ARM::VST1LNd8_UPD; 4823 case ARM::VST1LNdWB_fixed_Asm_P8: return ARM::VST1LNd8_UPD; 4824 case ARM::VST1LNdWB_fixed_Asm_I8: return ARM::VST1LNd8_UPD; 4825 case ARM::VST1LNdWB_fixed_Asm_S8: return ARM::VST1LNd8_UPD; 4826 case ARM::VST1LNdWB_fixed_Asm_U8: return ARM::VST1LNd8_UPD; 4827 case ARM::VST1LNdWB_fixed_Asm_16: return ARM::VST1LNd16_UPD; 4828 case ARM::VST1LNdWB_fixed_Asm_P16: return ARM::VST1LNd16_UPD; 4829 case ARM::VST1LNdWB_fixed_Asm_I16: return ARM::VST1LNd16_UPD; 4830 case ARM::VST1LNdWB_fixed_Asm_S16: return ARM::VST1LNd16_UPD; 4831 case ARM::VST1LNdWB_fixed_Asm_U16: return ARM::VST1LNd16_UPD; 4832 case ARM::VST1LNdWB_fixed_Asm_32: return ARM::VST1LNd32_UPD; 4833 case ARM::VST1LNdWB_fixed_Asm_F: return ARM::VST1LNd32_UPD; 4834 case ARM::VST1LNdWB_fixed_Asm_F32: return ARM::VST1LNd32_UPD; 4835 case ARM::VST1LNdWB_fixed_Asm_I32: return ARM::VST1LNd32_UPD; 4836 case ARM::VST1LNdWB_fixed_Asm_S32: return ARM::VST1LNd32_UPD; 4837 case ARM::VST1LNdWB_fixed_Asm_U32: return ARM::VST1LNd32_UPD; 4838 case ARM::VST1LNdWB_register_Asm_8: return ARM::VST1LNd8_UPD; 4839 case ARM::VST1LNdWB_register_Asm_P8: return ARM::VST1LNd8_UPD; 4840 case ARM::VST1LNdWB_register_Asm_I8: return ARM::VST1LNd8_UPD; 4841 case ARM::VST1LNdWB_register_Asm_S8: return ARM::VST1LNd8_UPD; 4842 case ARM::VST1LNdWB_register_Asm_U8: return ARM::VST1LNd8_UPD; 4843 case ARM::VST1LNdWB_register_Asm_16: return ARM::VST1LNd16_UPD; 4844 case ARM::VST1LNdWB_register_Asm_P16: return ARM::VST1LNd16_UPD; 4845 case ARM::VST1LNdWB_register_Asm_I16: return ARM::VST1LNd16_UPD; 4846 case ARM::VST1LNdWB_register_Asm_S16: return ARM::VST1LNd16_UPD; 4847 case ARM::VST1LNdWB_register_Asm_U16: return ARM::VST1LNd16_UPD; 4848 case ARM::VST1LNdWB_register_Asm_32: return ARM::VST1LNd32_UPD; 4849 case ARM::VST1LNdWB_register_Asm_F: return ARM::VST1LNd32_UPD; 4850 case ARM::VST1LNdWB_register_Asm_F32: return ARM::VST1LNd32_UPD; 4851 case ARM::VST1LNdWB_register_Asm_I32: return ARM::VST1LNd32_UPD; 4852 case ARM::VST1LNdWB_register_Asm_S32: return ARM::VST1LNd32_UPD; 4853 case ARM::VST1LNdWB_register_Asm_U32: return ARM::VST1LNd32_UPD; 4854 case ARM::VST1LNdAsm_8: return ARM::VST1LNd8; 4855 case ARM::VST1LNdAsm_P8: return ARM::VST1LNd8; 4856 case ARM::VST1LNdAsm_I8: return ARM::VST1LNd8; 4857 case ARM::VST1LNdAsm_S8: return ARM::VST1LNd8; 4858 case ARM::VST1LNdAsm_U8: return ARM::VST1LNd8; 4859 case ARM::VST1LNdAsm_16: return ARM::VST1LNd16; 4860 case ARM::VST1LNdAsm_P16: return ARM::VST1LNd16; 4861 case ARM::VST1LNdAsm_I16: return ARM::VST1LNd16; 4862 case ARM::VST1LNdAsm_S16: return ARM::VST1LNd16; 4863 case ARM::VST1LNdAsm_U16: return ARM::VST1LNd16; 4864 case ARM::VST1LNdAsm_32: return ARM::VST1LNd32; 4865 case ARM::VST1LNdAsm_F: return ARM::VST1LNd32; 4866 case ARM::VST1LNdAsm_F32: return ARM::VST1LNd32; 4867 case ARM::VST1LNdAsm_I32: return ARM::VST1LNd32; 4868 case ARM::VST1LNdAsm_S32: return ARM::VST1LNd32; 4869 case ARM::VST1LNdAsm_U32: return ARM::VST1LNd32; 4870 } 4871 } 4872 4873 static unsigned getRealVLDLNOpcode(unsigned Opc) { 4874 switch(Opc) { 4875 default: assert(0 && "unexpected opcode!"); 4876 case ARM::VLD1LNdWB_fixed_Asm_8: return ARM::VLD1LNd8_UPD; 4877 case ARM::VLD1LNdWB_fixed_Asm_P8: return ARM::VLD1LNd8_UPD; 4878 case ARM::VLD1LNdWB_fixed_Asm_I8: return ARM::VLD1LNd8_UPD; 4879 case ARM::VLD1LNdWB_fixed_Asm_S8: return ARM::VLD1LNd8_UPD; 4880 case ARM::VLD1LNdWB_fixed_Asm_U8: return ARM::VLD1LNd8_UPD; 4881 case ARM::VLD1LNdWB_fixed_Asm_16: return ARM::VLD1LNd16_UPD; 4882 case ARM::VLD1LNdWB_fixed_Asm_P16: return ARM::VLD1LNd16_UPD; 4883 case ARM::VLD1LNdWB_fixed_Asm_I16: return ARM::VLD1LNd16_UPD; 4884 case ARM::VLD1LNdWB_fixed_Asm_S16: return ARM::VLD1LNd16_UPD; 4885 case ARM::VLD1LNdWB_fixed_Asm_U16: return ARM::VLD1LNd16_UPD; 4886 case ARM::VLD1LNdWB_fixed_Asm_32: return ARM::VLD1LNd32_UPD; 4887 case ARM::VLD1LNdWB_fixed_Asm_F: return ARM::VLD1LNd32_UPD; 4888 case ARM::VLD1LNdWB_fixed_Asm_F32: return ARM::VLD1LNd32_UPD; 4889 case ARM::VLD1LNdWB_fixed_Asm_I32: return ARM::VLD1LNd32_UPD; 4890 case ARM::VLD1LNdWB_fixed_Asm_S32: return ARM::VLD1LNd32_UPD; 4891 case ARM::VLD1LNdWB_fixed_Asm_U32: return ARM::VLD1LNd32_UPD; 4892 case ARM::VLD1LNdWB_register_Asm_8: return ARM::VLD1LNd8_UPD; 4893 case ARM::VLD1LNdWB_register_Asm_P8: return ARM::VLD1LNd8_UPD; 4894 case ARM::VLD1LNdWB_register_Asm_I8: return ARM::VLD1LNd8_UPD; 4895 case ARM::VLD1LNdWB_register_Asm_S8: return ARM::VLD1LNd8_UPD; 4896 case ARM::VLD1LNdWB_register_Asm_U8: return ARM::VLD1LNd8_UPD; 4897 case ARM::VLD1LNdWB_register_Asm_16: return ARM::VLD1LNd16_UPD; 4898 case ARM::VLD1LNdWB_register_Asm_P16: return ARM::VLD1LNd16_UPD; 4899 case ARM::VLD1LNdWB_register_Asm_I16: return ARM::VLD1LNd16_UPD; 4900 case ARM::VLD1LNdWB_register_Asm_S16: return ARM::VLD1LNd16_UPD; 4901 case ARM::VLD1LNdWB_register_Asm_U16: return ARM::VLD1LNd16_UPD; 4902 case ARM::VLD1LNdWB_register_Asm_32: return ARM::VLD1LNd32_UPD; 4903 case ARM::VLD1LNdWB_register_Asm_F: return ARM::VLD1LNd32_UPD; 4904 case ARM::VLD1LNdWB_register_Asm_F32: return ARM::VLD1LNd32_UPD; 4905 case ARM::VLD1LNdWB_register_Asm_I32: return ARM::VLD1LNd32_UPD; 4906 case ARM::VLD1LNdWB_register_Asm_S32: return ARM::VLD1LNd32_UPD; 4907 case ARM::VLD1LNdWB_register_Asm_U32: return ARM::VLD1LNd32_UPD; 4908 case ARM::VLD1LNdAsm_8: return ARM::VLD1LNd8; 4909 case ARM::VLD1LNdAsm_P8: return ARM::VLD1LNd8; 4910 case ARM::VLD1LNdAsm_I8: return ARM::VLD1LNd8; 4911 case ARM::VLD1LNdAsm_S8: return ARM::VLD1LNd8; 4912 case ARM::VLD1LNdAsm_U8: return ARM::VLD1LNd8; 4913 case ARM::VLD1LNdAsm_16: return ARM::VLD1LNd16; 4914 case ARM::VLD1LNdAsm_P16: return ARM::VLD1LNd16; 4915 case ARM::VLD1LNdAsm_I16: return ARM::VLD1LNd16; 4916 case ARM::VLD1LNdAsm_S16: return ARM::VLD1LNd16; 4917 case ARM::VLD1LNdAsm_U16: return ARM::VLD1LNd16; 4918 case ARM::VLD1LNdAsm_32: return ARM::VLD1LNd32; 4919 case ARM::VLD1LNdAsm_F: return ARM::VLD1LNd32; 4920 case ARM::VLD1LNdAsm_F32: return ARM::VLD1LNd32; 4921 case ARM::VLD1LNdAsm_I32: return ARM::VLD1LNd32; 4922 case ARM::VLD1LNdAsm_S32: return ARM::VLD1LNd32; 4923 case ARM::VLD1LNdAsm_U32: return ARM::VLD1LNd32; 4924 } 4925 } 4926 4927 bool ARMAsmParser:: 4928 processInstruction(MCInst &Inst, 4929 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4930 switch (Inst.getOpcode()) { 4931 // Handle NEON VST1 complex aliases. 4932 case ARM::VST1LNdWB_register_Asm_8: 4933 case ARM::VST1LNdWB_register_Asm_P8: 4934 case ARM::VST1LNdWB_register_Asm_I8: 4935 case ARM::VST1LNdWB_register_Asm_S8: 4936 case ARM::VST1LNdWB_register_Asm_U8: 4937 case ARM::VST1LNdWB_register_Asm_16: 4938 case ARM::VST1LNdWB_register_Asm_P16: 4939 case ARM::VST1LNdWB_register_Asm_I16: 4940 case ARM::VST1LNdWB_register_Asm_S16: 4941 case ARM::VST1LNdWB_register_Asm_U16: 4942 case ARM::VST1LNdWB_register_Asm_32: 4943 case ARM::VST1LNdWB_register_Asm_F: 4944 case ARM::VST1LNdWB_register_Asm_F32: 4945 case ARM::VST1LNdWB_register_Asm_I32: 4946 case ARM::VST1LNdWB_register_Asm_S32: 4947 case ARM::VST1LNdWB_register_Asm_U32: { 4948 MCInst TmpInst; 4949 // Shuffle the operands around so the lane index operand is in the 4950 // right place. 4951 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 4952 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 4953 TmpInst.addOperand(Inst.getOperand(2)); // Rn 4954 TmpInst.addOperand(Inst.getOperand(3)); // alignment 4955 TmpInst.addOperand(Inst.getOperand(4)); // Rm 4956 TmpInst.addOperand(Inst.getOperand(0)); // Vd 4957 TmpInst.addOperand(Inst.getOperand(1)); // lane 4958 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 4959 TmpInst.addOperand(Inst.getOperand(6)); 4960 Inst = TmpInst; 4961 return true; 4962 } 4963 case ARM::VST1LNdWB_fixed_Asm_8: 4964 case ARM::VST1LNdWB_fixed_Asm_P8: 4965 case ARM::VST1LNdWB_fixed_Asm_I8: 4966 case ARM::VST1LNdWB_fixed_Asm_S8: 4967 case ARM::VST1LNdWB_fixed_Asm_U8: 4968 case ARM::VST1LNdWB_fixed_Asm_16: 4969 case ARM::VST1LNdWB_fixed_Asm_P16: 4970 case ARM::VST1LNdWB_fixed_Asm_I16: 4971 case ARM::VST1LNdWB_fixed_Asm_S16: 4972 case ARM::VST1LNdWB_fixed_Asm_U16: 4973 case ARM::VST1LNdWB_fixed_Asm_32: 4974 case ARM::VST1LNdWB_fixed_Asm_F: 4975 case ARM::VST1LNdWB_fixed_Asm_F32: 4976 case ARM::VST1LNdWB_fixed_Asm_I32: 4977 case ARM::VST1LNdWB_fixed_Asm_S32: 4978 case ARM::VST1LNdWB_fixed_Asm_U32: { 4979 MCInst TmpInst; 4980 // Shuffle the operands around so the lane index operand is in the 4981 // right place. 4982 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 4983 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 4984 TmpInst.addOperand(Inst.getOperand(2)); // Rn 4985 TmpInst.addOperand(Inst.getOperand(3)); // alignment 4986 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 4987 TmpInst.addOperand(Inst.getOperand(0)); // Vd 4988 TmpInst.addOperand(Inst.getOperand(1)); // lane 4989 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 4990 TmpInst.addOperand(Inst.getOperand(5)); 4991 Inst = TmpInst; 4992 return true; 4993 } 4994 case ARM::VST1LNdAsm_8: 4995 case ARM::VST1LNdAsm_P8: 4996 case ARM::VST1LNdAsm_I8: 4997 case ARM::VST1LNdAsm_S8: 4998 case ARM::VST1LNdAsm_U8: 4999 case ARM::VST1LNdAsm_16: 5000 case ARM::VST1LNdAsm_P16: 5001 case ARM::VST1LNdAsm_I16: 5002 case ARM::VST1LNdAsm_S16: 5003 case ARM::VST1LNdAsm_U16: 5004 case ARM::VST1LNdAsm_32: 5005 case ARM::VST1LNdAsm_F: 5006 case ARM::VST1LNdAsm_F32: 5007 case ARM::VST1LNdAsm_I32: 5008 case ARM::VST1LNdAsm_S32: 5009 case ARM::VST1LNdAsm_U32: { 5010 MCInst TmpInst; 5011 // Shuffle the operands around so the lane index operand is in the 5012 // right place. 5013 TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode())); 5014 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5015 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5016 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5017 TmpInst.addOperand(Inst.getOperand(1)); // lane 5018 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5019 TmpInst.addOperand(Inst.getOperand(5)); 5020 Inst = TmpInst; 5021 return true; 5022 } 5023 // Handle NEON VLD1 complex aliases. 5024 case ARM::VLD1LNdWB_register_Asm_8: 5025 case ARM::VLD1LNdWB_register_Asm_P8: 5026 case ARM::VLD1LNdWB_register_Asm_I8: 5027 case ARM::VLD1LNdWB_register_Asm_S8: 5028 case ARM::VLD1LNdWB_register_Asm_U8: 5029 case ARM::VLD1LNdWB_register_Asm_16: 5030 case ARM::VLD1LNdWB_register_Asm_P16: 5031 case ARM::VLD1LNdWB_register_Asm_I16: 5032 case ARM::VLD1LNdWB_register_Asm_S16: 5033 case ARM::VLD1LNdWB_register_Asm_U16: 5034 case ARM::VLD1LNdWB_register_Asm_32: 5035 case ARM::VLD1LNdWB_register_Asm_F: 5036 case ARM::VLD1LNdWB_register_Asm_F32: 5037 case ARM::VLD1LNdWB_register_Asm_I32: 5038 case ARM::VLD1LNdWB_register_Asm_S32: 5039 case ARM::VLD1LNdWB_register_Asm_U32: { 5040 MCInst TmpInst; 5041 // Shuffle the operands around so the lane index operand is in the 5042 // right place. 5043 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5044 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5045 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5046 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5047 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5048 TmpInst.addOperand(Inst.getOperand(4)); // Rm 5049 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5050 TmpInst.addOperand(Inst.getOperand(1)); // lane 5051 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 5052 TmpInst.addOperand(Inst.getOperand(6)); 5053 Inst = TmpInst; 5054 return true; 5055 } 5056 case ARM::VLD1LNdWB_fixed_Asm_8: 5057 case ARM::VLD1LNdWB_fixed_Asm_P8: 5058 case ARM::VLD1LNdWB_fixed_Asm_I8: 5059 case ARM::VLD1LNdWB_fixed_Asm_S8: 5060 case ARM::VLD1LNdWB_fixed_Asm_U8: 5061 case ARM::VLD1LNdWB_fixed_Asm_16: 5062 case ARM::VLD1LNdWB_fixed_Asm_P16: 5063 case ARM::VLD1LNdWB_fixed_Asm_I16: 5064 case ARM::VLD1LNdWB_fixed_Asm_S16: 5065 case ARM::VLD1LNdWB_fixed_Asm_U16: 5066 case ARM::VLD1LNdWB_fixed_Asm_32: 5067 case ARM::VLD1LNdWB_fixed_Asm_F: 5068 case ARM::VLD1LNdWB_fixed_Asm_F32: 5069 case ARM::VLD1LNdWB_fixed_Asm_I32: 5070 case ARM::VLD1LNdWB_fixed_Asm_S32: 5071 case ARM::VLD1LNdWB_fixed_Asm_U32: { 5072 MCInst TmpInst; 5073 // Shuffle the operands around so the lane index operand is in the 5074 // right place. 5075 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5076 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5077 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 5078 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5079 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5080 TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm 5081 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5082 TmpInst.addOperand(Inst.getOperand(1)); // lane 5083 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5084 TmpInst.addOperand(Inst.getOperand(5)); 5085 Inst = TmpInst; 5086 return true; 5087 } 5088 case ARM::VLD1LNdAsm_8: 5089 case ARM::VLD1LNdAsm_P8: 5090 case ARM::VLD1LNdAsm_I8: 5091 case ARM::VLD1LNdAsm_S8: 5092 case ARM::VLD1LNdAsm_U8: 5093 case ARM::VLD1LNdAsm_16: 5094 case ARM::VLD1LNdAsm_P16: 5095 case ARM::VLD1LNdAsm_I16: 5096 case ARM::VLD1LNdAsm_S16: 5097 case ARM::VLD1LNdAsm_U16: 5098 case ARM::VLD1LNdAsm_32: 5099 case ARM::VLD1LNdAsm_F: 5100 case ARM::VLD1LNdAsm_F32: 5101 case ARM::VLD1LNdAsm_I32: 5102 case ARM::VLD1LNdAsm_S32: 5103 case ARM::VLD1LNdAsm_U32: { 5104 MCInst TmpInst; 5105 // Shuffle the operands around so the lane index operand is in the 5106 // right place. 5107 TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode())); 5108 TmpInst.addOperand(Inst.getOperand(0)); // Vd 5109 TmpInst.addOperand(Inst.getOperand(2)); // Rn 5110 TmpInst.addOperand(Inst.getOperand(3)); // alignment 5111 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 5112 TmpInst.addOperand(Inst.getOperand(1)); // lane 5113 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 5114 TmpInst.addOperand(Inst.getOperand(5)); 5115 Inst = TmpInst; 5116 return true; 5117 } 5118 // Handle the MOV complex aliases. 5119 case ARM::ASRr: 5120 case ARM::LSRr: 5121 case ARM::LSLr: 5122 case ARM::RORr: { 5123 ARM_AM::ShiftOpc ShiftTy; 5124 switch(Inst.getOpcode()) { 5125 default: llvm_unreachable("unexpected opcode!"); 5126 case ARM::ASRr: ShiftTy = ARM_AM::asr; break; 5127 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; 5128 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; 5129 case ARM::RORr: ShiftTy = ARM_AM::ror; break; 5130 } 5131 // A shift by zero is a plain MOVr, not a MOVsi. 5132 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); 5133 MCInst TmpInst; 5134 TmpInst.setOpcode(ARM::MOVsr); 5135 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5136 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5137 TmpInst.addOperand(Inst.getOperand(2)); // Rm 5138 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5139 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 5140 TmpInst.addOperand(Inst.getOperand(4)); 5141 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 5142 Inst = TmpInst; 5143 return true; 5144 } 5145 case ARM::ASRi: 5146 case ARM::LSRi: 5147 case ARM::LSLi: 5148 case ARM::RORi: { 5149 ARM_AM::ShiftOpc ShiftTy; 5150 switch(Inst.getOpcode()) { 5151 default: llvm_unreachable("unexpected opcode!"); 5152 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 5153 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 5154 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 5155 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 5156 } 5157 // A shift by zero is a plain MOVr, not a MOVsi. 5158 unsigned Amt = Inst.getOperand(2).getImm(); 5159 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 5160 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 5161 MCInst TmpInst; 5162 TmpInst.setOpcode(Opc); 5163 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5164 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5165 if (Opc == ARM::MOVsi) 5166 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5167 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 5168 TmpInst.addOperand(Inst.getOperand(4)); 5169 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 5170 Inst = TmpInst; 5171 return true; 5172 } 5173 case ARM::RRXi: { 5174 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); 5175 MCInst TmpInst; 5176 TmpInst.setOpcode(ARM::MOVsi); 5177 TmpInst.addOperand(Inst.getOperand(0)); // Rd 5178 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5179 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 5180 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5181 TmpInst.addOperand(Inst.getOperand(3)); 5182 TmpInst.addOperand(Inst.getOperand(4)); // cc_out 5183 Inst = TmpInst; 5184 return true; 5185 } 5186 case ARM::t2LDMIA_UPD: { 5187 // If this is a load of a single register, then we should use 5188 // a post-indexed LDR instruction instead, per the ARM ARM. 5189 if (Inst.getNumOperands() != 5) 5190 return false; 5191 MCInst TmpInst; 5192 TmpInst.setOpcode(ARM::t2LDR_POST); 5193 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5194 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5195 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5196 TmpInst.addOperand(MCOperand::CreateImm(4)); 5197 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5198 TmpInst.addOperand(Inst.getOperand(3)); 5199 Inst = TmpInst; 5200 return true; 5201 } 5202 case ARM::t2STMDB_UPD: { 5203 // If this is a store of a single register, then we should use 5204 // a pre-indexed STR instruction instead, per the ARM ARM. 5205 if (Inst.getNumOperands() != 5) 5206 return false; 5207 MCInst TmpInst; 5208 TmpInst.setOpcode(ARM::t2STR_PRE); 5209 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5210 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5211 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5212 TmpInst.addOperand(MCOperand::CreateImm(-4)); 5213 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5214 TmpInst.addOperand(Inst.getOperand(3)); 5215 Inst = TmpInst; 5216 return true; 5217 } 5218 case ARM::LDMIA_UPD: 5219 // If this is a load of a single register via a 'pop', then we should use 5220 // a post-indexed LDR instruction instead, per the ARM ARM. 5221 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 5222 Inst.getNumOperands() == 5) { 5223 MCInst TmpInst; 5224 TmpInst.setOpcode(ARM::LDR_POST_IMM); 5225 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5226 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5227 TmpInst.addOperand(Inst.getOperand(1)); // Rn 5228 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 5229 TmpInst.addOperand(MCOperand::CreateImm(4)); 5230 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5231 TmpInst.addOperand(Inst.getOperand(3)); 5232 Inst = TmpInst; 5233 return true; 5234 } 5235 break; 5236 case ARM::STMDB_UPD: 5237 // If this is a store of a single register via a 'push', then we should use 5238 // a pre-indexed STR instruction instead, per the ARM ARM. 5239 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 5240 Inst.getNumOperands() == 5) { 5241 MCInst TmpInst; 5242 TmpInst.setOpcode(ARM::STR_PRE_IMM); 5243 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 5244 TmpInst.addOperand(Inst.getOperand(4)); // Rt 5245 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 5246 TmpInst.addOperand(MCOperand::CreateImm(-4)); 5247 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 5248 TmpInst.addOperand(Inst.getOperand(3)); 5249 Inst = TmpInst; 5250 } 5251 break; 5252 case ARM::t2ADDri12: 5253 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add" 5254 // mnemonic was used (not "addw"), encoding T3 is preferred. 5255 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" || 5256 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 5257 break; 5258 Inst.setOpcode(ARM::t2ADDri); 5259 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 5260 break; 5261 case ARM::t2SUBri12: 5262 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub" 5263 // mnemonic was used (not "subw"), encoding T3 is preferred. 5264 if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" || 5265 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 5266 break; 5267 Inst.setOpcode(ARM::t2SUBri); 5268 Inst.addOperand(MCOperand::CreateReg(0)); // cc_out 5269 break; 5270 case ARM::tADDi8: 5271 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 5272 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 5273 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 5274 // to encoding T1 if <Rd> is omitted." 5275 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 5276 Inst.setOpcode(ARM::tADDi3); 5277 return true; 5278 } 5279 break; 5280 case ARM::tSUBi8: 5281 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 5282 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 5283 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 5284 // to encoding T1 if <Rd> is omitted." 5285 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 5286 Inst.setOpcode(ARM::tSUBi3); 5287 return true; 5288 } 5289 break; 5290 case ARM::t2ADDrr: { 5291 // If the destination and first source operand are the same, and 5292 // there's no setting of the flags, use encoding T2 instead of T3. 5293 // Note that this is only for ADD, not SUB. This mirrors the system 5294 // 'as' behaviour. Make sure the wide encoding wasn't explicit. 5295 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 5296 Inst.getOperand(5).getReg() != 0 || 5297 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5298 static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) 5299 break; 5300 MCInst TmpInst; 5301 TmpInst.setOpcode(ARM::tADDhirr); 5302 TmpInst.addOperand(Inst.getOperand(0)); 5303 TmpInst.addOperand(Inst.getOperand(0)); 5304 TmpInst.addOperand(Inst.getOperand(2)); 5305 TmpInst.addOperand(Inst.getOperand(3)); 5306 TmpInst.addOperand(Inst.getOperand(4)); 5307 Inst = TmpInst; 5308 return true; 5309 } 5310 case ARM::tB: 5311 // A Thumb conditional branch outside of an IT block is a tBcc. 5312 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 5313 Inst.setOpcode(ARM::tBcc); 5314 return true; 5315 } 5316 break; 5317 case ARM::t2B: 5318 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 5319 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 5320 Inst.setOpcode(ARM::t2Bcc); 5321 return true; 5322 } 5323 break; 5324 case ARM::t2Bcc: 5325 // If the conditional is AL or we're in an IT block, we really want t2B. 5326 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 5327 Inst.setOpcode(ARM::t2B); 5328 return true; 5329 } 5330 break; 5331 case ARM::tBcc: 5332 // If the conditional is AL, we really want tB. 5333 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 5334 Inst.setOpcode(ARM::tB); 5335 return true; 5336 } 5337 break; 5338 case ARM::tLDMIA: { 5339 // If the register list contains any high registers, or if the writeback 5340 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 5341 // instead if we're in Thumb2. Otherwise, this should have generated 5342 // an error in validateInstruction(). 5343 unsigned Rn = Inst.getOperand(0).getReg(); 5344 bool hasWritebackToken = 5345 (static_cast<ARMOperand*>(Operands[3])->isToken() && 5346 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 5347 bool listContainsBase; 5348 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 5349 (!listContainsBase && !hasWritebackToken) || 5350 (listContainsBase && hasWritebackToken)) { 5351 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 5352 assert (isThumbTwo()); 5353 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 5354 // If we're switching to the updating version, we need to insert 5355 // the writeback tied operand. 5356 if (hasWritebackToken) 5357 Inst.insert(Inst.begin(), 5358 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 5359 return true; 5360 } 5361 break; 5362 } 5363 case ARM::tSTMIA_UPD: { 5364 // If the register list contains any high registers, we need to use 5365 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 5366 // should have generated an error in validateInstruction(). 5367 unsigned Rn = Inst.getOperand(0).getReg(); 5368 bool listContainsBase; 5369 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 5370 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 5371 assert (isThumbTwo()); 5372 Inst.setOpcode(ARM::t2STMIA_UPD); 5373 return true; 5374 } 5375 break; 5376 } 5377 case ARM::tPOP: { 5378 bool listContainsBase; 5379 // If the register list contains any high registers, we need to use 5380 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 5381 // should have generated an error in validateInstruction(). 5382 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 5383 return false; 5384 assert (isThumbTwo()); 5385 Inst.setOpcode(ARM::t2LDMIA_UPD); 5386 // Add the base register and writeback operands. 5387 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5388 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5389 return true; 5390 } 5391 case ARM::tPUSH: { 5392 bool listContainsBase; 5393 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 5394 return false; 5395 assert (isThumbTwo()); 5396 Inst.setOpcode(ARM::t2STMDB_UPD); 5397 // Add the base register and writeback operands. 5398 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5399 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 5400 return true; 5401 } 5402 case ARM::t2MOVi: { 5403 // If we can use the 16-bit encoding and the user didn't explicitly 5404 // request the 32-bit variant, transform it here. 5405 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5406 Inst.getOperand(1).getImm() <= 255 && 5407 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 5408 Inst.getOperand(4).getReg() == ARM::CPSR) || 5409 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 5410 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5411 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5412 // The operands aren't in the same order for tMOVi8... 5413 MCInst TmpInst; 5414 TmpInst.setOpcode(ARM::tMOVi8); 5415 TmpInst.addOperand(Inst.getOperand(0)); 5416 TmpInst.addOperand(Inst.getOperand(4)); 5417 TmpInst.addOperand(Inst.getOperand(1)); 5418 TmpInst.addOperand(Inst.getOperand(2)); 5419 TmpInst.addOperand(Inst.getOperand(3)); 5420 Inst = TmpInst; 5421 return true; 5422 } 5423 break; 5424 } 5425 case ARM::t2MOVr: { 5426 // If we can use the 16-bit encoding and the user didn't explicitly 5427 // request the 32-bit variant, transform it here. 5428 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5429 isARMLowRegister(Inst.getOperand(1).getReg()) && 5430 Inst.getOperand(2).getImm() == ARMCC::AL && 5431 Inst.getOperand(4).getReg() == ARM::CPSR && 5432 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5433 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5434 // The operands aren't the same for tMOV[S]r... (no cc_out) 5435 MCInst TmpInst; 5436 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 5437 TmpInst.addOperand(Inst.getOperand(0)); 5438 TmpInst.addOperand(Inst.getOperand(1)); 5439 TmpInst.addOperand(Inst.getOperand(2)); 5440 TmpInst.addOperand(Inst.getOperand(3)); 5441 Inst = TmpInst; 5442 return true; 5443 } 5444 break; 5445 } 5446 case ARM::t2SXTH: 5447 case ARM::t2SXTB: 5448 case ARM::t2UXTH: 5449 case ARM::t2UXTB: { 5450 // If we can use the 16-bit encoding and the user didn't explicitly 5451 // request the 32-bit variant, transform it here. 5452 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 5453 isARMLowRegister(Inst.getOperand(1).getReg()) && 5454 Inst.getOperand(2).getImm() == 0 && 5455 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 5456 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 5457 unsigned NewOpc; 5458 switch (Inst.getOpcode()) { 5459 default: llvm_unreachable("Illegal opcode!"); 5460 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 5461 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 5462 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 5463 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 5464 } 5465 // The operands aren't the same for thumb1 (no rotate operand). 5466 MCInst TmpInst; 5467 TmpInst.setOpcode(NewOpc); 5468 TmpInst.addOperand(Inst.getOperand(0)); 5469 TmpInst.addOperand(Inst.getOperand(1)); 5470 TmpInst.addOperand(Inst.getOperand(3)); 5471 TmpInst.addOperand(Inst.getOperand(4)); 5472 Inst = TmpInst; 5473 return true; 5474 } 5475 break; 5476 } 5477 case ARM::t2IT: { 5478 // The mask bits for all but the first condition are represented as 5479 // the low bit of the condition code value implies 't'. We currently 5480 // always have 1 implies 't', so XOR toggle the bits if the low bit 5481 // of the condition code is zero. The encoding also expects the low 5482 // bit of the condition to be encoded as bit 4 of the mask operand, 5483 // so mask that in if needed 5484 MCOperand &MO = Inst.getOperand(1); 5485 unsigned Mask = MO.getImm(); 5486 unsigned OrigMask = Mask; 5487 unsigned TZ = CountTrailingZeros_32(Mask); 5488 if ((Inst.getOperand(0).getImm() & 1) == 0) { 5489 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 5490 for (unsigned i = 3; i != TZ; --i) 5491 Mask ^= 1 << i; 5492 } else 5493 Mask |= 0x10; 5494 MO.setImm(Mask); 5495 5496 // Set up the IT block state according to the IT instruction we just 5497 // matched. 5498 assert(!inITBlock() && "nested IT blocks?!"); 5499 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 5500 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 5501 ITState.CurPosition = 0; 5502 ITState.FirstCond = true; 5503 break; 5504 } 5505 } 5506 return false; 5507 } 5508 5509 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 5510 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 5511 // suffix depending on whether they're in an IT block or not. 5512 unsigned Opc = Inst.getOpcode(); 5513 const MCInstrDesc &MCID = getInstDesc(Opc); 5514 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 5515 assert(MCID.hasOptionalDef() && 5516 "optionally flag setting instruction missing optional def operand"); 5517 assert(MCID.NumOperands == Inst.getNumOperands() && 5518 "operand count mismatch!"); 5519 // Find the optional-def operand (cc_out). 5520 unsigned OpNo; 5521 for (OpNo = 0; 5522 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 5523 ++OpNo) 5524 ; 5525 // If we're parsing Thumb1, reject it completely. 5526 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 5527 return Match_MnemonicFail; 5528 // If we're parsing Thumb2, which form is legal depends on whether we're 5529 // in an IT block. 5530 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 5531 !inITBlock()) 5532 return Match_RequiresITBlock; 5533 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 5534 inITBlock()) 5535 return Match_RequiresNotITBlock; 5536 } 5537 // Some high-register supporting Thumb1 encodings only allow both registers 5538 // to be from r0-r7 when in Thumb2. 5539 else if (Opc == ARM::tADDhirr && isThumbOne() && 5540 isARMLowRegister(Inst.getOperand(1).getReg()) && 5541 isARMLowRegister(Inst.getOperand(2).getReg())) 5542 return Match_RequiresThumb2; 5543 // Others only require ARMv6 or later. 5544 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 5545 isARMLowRegister(Inst.getOperand(0).getReg()) && 5546 isARMLowRegister(Inst.getOperand(1).getReg())) 5547 return Match_RequiresV6; 5548 return Match_Success; 5549 } 5550 5551 bool ARMAsmParser:: 5552 MatchAndEmitInstruction(SMLoc IDLoc, 5553 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 5554 MCStreamer &Out) { 5555 MCInst Inst; 5556 unsigned ErrorInfo; 5557 unsigned MatchResult; 5558 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 5559 switch (MatchResult) { 5560 default: break; 5561 case Match_Success: 5562 // Context sensitive operand constraints aren't handled by the matcher, 5563 // so check them here. 5564 if (validateInstruction(Inst, Operands)) { 5565 // Still progress the IT block, otherwise one wrong condition causes 5566 // nasty cascading errors. 5567 forwardITPosition(); 5568 return true; 5569 } 5570 5571 // Some instructions need post-processing to, for example, tweak which 5572 // encoding is selected. Loop on it while changes happen so the 5573 // individual transformations can chain off each other. E.g., 5574 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 5575 while (processInstruction(Inst, Operands)) 5576 ; 5577 5578 // Only move forward at the very end so that everything in validate 5579 // and process gets a consistent answer about whether we're in an IT 5580 // block. 5581 forwardITPosition(); 5582 5583 Out.EmitInstruction(Inst); 5584 return false; 5585 case Match_MissingFeature: 5586 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 5587 return true; 5588 case Match_InvalidOperand: { 5589 SMLoc ErrorLoc = IDLoc; 5590 if (ErrorInfo != ~0U) { 5591 if (ErrorInfo >= Operands.size()) 5592 return Error(IDLoc, "too few operands for instruction"); 5593 5594 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 5595 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 5596 } 5597 5598 return Error(ErrorLoc, "invalid operand for instruction"); 5599 } 5600 case Match_MnemonicFail: 5601 return Error(IDLoc, "invalid instruction"); 5602 case Match_ConversionFail: 5603 // The converter function will have already emited a diagnostic. 5604 return true; 5605 case Match_RequiresNotITBlock: 5606 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 5607 case Match_RequiresITBlock: 5608 return Error(IDLoc, "instruction only valid inside IT block"); 5609 case Match_RequiresV6: 5610 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 5611 case Match_RequiresThumb2: 5612 return Error(IDLoc, "instruction variant requires Thumb2"); 5613 } 5614 5615 llvm_unreachable("Implement any new match types added!"); 5616 return true; 5617 } 5618 5619 /// parseDirective parses the arm specific directives 5620 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 5621 StringRef IDVal = DirectiveID.getIdentifier(); 5622 if (IDVal == ".word") 5623 return parseDirectiveWord(4, DirectiveID.getLoc()); 5624 else if (IDVal == ".thumb") 5625 return parseDirectiveThumb(DirectiveID.getLoc()); 5626 else if (IDVal == ".arm") 5627 return parseDirectiveARM(DirectiveID.getLoc()); 5628 else if (IDVal == ".thumb_func") 5629 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 5630 else if (IDVal == ".code") 5631 return parseDirectiveCode(DirectiveID.getLoc()); 5632 else if (IDVal == ".syntax") 5633 return parseDirectiveSyntax(DirectiveID.getLoc()); 5634 return true; 5635 } 5636 5637 /// parseDirectiveWord 5638 /// ::= .word [ expression (, expression)* ] 5639 bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 5640 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5641 for (;;) { 5642 const MCExpr *Value; 5643 if (getParser().ParseExpression(Value)) 5644 return true; 5645 5646 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 5647 5648 if (getLexer().is(AsmToken::EndOfStatement)) 5649 break; 5650 5651 // FIXME: Improve diagnostic. 5652 if (getLexer().isNot(AsmToken::Comma)) 5653 return Error(L, "unexpected token in directive"); 5654 Parser.Lex(); 5655 } 5656 } 5657 5658 Parser.Lex(); 5659 return false; 5660 } 5661 5662 /// parseDirectiveThumb 5663 /// ::= .thumb 5664 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 5665 if (getLexer().isNot(AsmToken::EndOfStatement)) 5666 return Error(L, "unexpected token in directive"); 5667 Parser.Lex(); 5668 5669 if (!isThumb()) 5670 SwitchMode(); 5671 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 5672 return false; 5673 } 5674 5675 /// parseDirectiveARM 5676 /// ::= .arm 5677 bool ARMAsmParser::parseDirectiveARM(SMLoc L) { 5678 if (getLexer().isNot(AsmToken::EndOfStatement)) 5679 return Error(L, "unexpected token in directive"); 5680 Parser.Lex(); 5681 5682 if (isThumb()) 5683 SwitchMode(); 5684 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 5685 return false; 5686 } 5687 5688 /// parseDirectiveThumbFunc 5689 /// ::= .thumbfunc symbol_name 5690 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 5691 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 5692 bool isMachO = MAI.hasSubsectionsViaSymbols(); 5693 StringRef Name; 5694 5695 // Darwin asm has function name after .thumb_func direction 5696 // ELF doesn't 5697 if (isMachO) { 5698 const AsmToken &Tok = Parser.getTok(); 5699 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 5700 return Error(L, "unexpected token in .thumb_func directive"); 5701 Name = Tok.getIdentifier(); 5702 Parser.Lex(); // Consume the identifier token. 5703 } 5704 5705 if (getLexer().isNot(AsmToken::EndOfStatement)) 5706 return Error(L, "unexpected token in directive"); 5707 Parser.Lex(); 5708 5709 // FIXME: assuming function name will be the line following .thumb_func 5710 if (!isMachO) { 5711 Name = Parser.getTok().getIdentifier(); 5712 } 5713 5714 // Mark symbol as a thumb symbol. 5715 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 5716 getParser().getStreamer().EmitThumbFunc(Func); 5717 return false; 5718 } 5719 5720 /// parseDirectiveSyntax 5721 /// ::= .syntax unified | divided 5722 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 5723 const AsmToken &Tok = Parser.getTok(); 5724 if (Tok.isNot(AsmToken::Identifier)) 5725 return Error(L, "unexpected token in .syntax directive"); 5726 StringRef Mode = Tok.getString(); 5727 if (Mode == "unified" || Mode == "UNIFIED") 5728 Parser.Lex(); 5729 else if (Mode == "divided" || Mode == "DIVIDED") 5730 return Error(L, "'.syntax divided' arm asssembly not supported"); 5731 else 5732 return Error(L, "unrecognized syntax mode in .syntax directive"); 5733 5734 if (getLexer().isNot(AsmToken::EndOfStatement)) 5735 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5736 Parser.Lex(); 5737 5738 // TODO tell the MC streamer the mode 5739 // getParser().getStreamer().Emit???(); 5740 return false; 5741 } 5742 5743 /// parseDirectiveCode 5744 /// ::= .code 16 | 32 5745 bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 5746 const AsmToken &Tok = Parser.getTok(); 5747 if (Tok.isNot(AsmToken::Integer)) 5748 return Error(L, "unexpected token in .code directive"); 5749 int64_t Val = Parser.getTok().getIntVal(); 5750 if (Val == 16) 5751 Parser.Lex(); 5752 else if (Val == 32) 5753 Parser.Lex(); 5754 else 5755 return Error(L, "invalid operand to .code directive"); 5756 5757 if (getLexer().isNot(AsmToken::EndOfStatement)) 5758 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5759 Parser.Lex(); 5760 5761 if (Val == 16) { 5762 if (!isThumb()) 5763 SwitchMode(); 5764 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 5765 } else { 5766 if (isThumb()) 5767 SwitchMode(); 5768 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 5769 } 5770 5771 return false; 5772 } 5773 5774 extern "C" void LLVMInitializeARMAsmLexer(); 5775 5776 /// Force static initialization. 5777 extern "C" void LLVMInitializeARMAsmParser() { 5778 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 5779 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 5780 LLVMInitializeARMAsmLexer(); 5781 } 5782 5783 #define GET_REGISTER_MATCHER 5784 #define GET_MATCHER_IMPLEMENTATION 5785 #include "ARMGenAsmMatcher.inc" 5786