1 //===- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions -------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ARMFeatures.h" 10 #include "Utils/ARMBaseInfo.h" 11 #include "MCTargetDesc/ARMAddressingModes.h" 12 #include "MCTargetDesc/ARMBaseInfo.h" 13 #include "MCTargetDesc/ARMInstPrinter.h" 14 #include "MCTargetDesc/ARMMCExpr.h" 15 #include "MCTargetDesc/ARMMCTargetDesc.h" 16 #include "TargetInfo/ARMTargetInfo.h" 17 #include "llvm/ADT/APFloat.h" 18 #include "llvm/ADT/APInt.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/StringMap.h" 24 #include "llvm/ADT/StringRef.h" 25 #include "llvm/ADT/StringSwitch.h" 26 #include "llvm/ADT/Triple.h" 27 #include "llvm/ADT/Twine.h" 28 #include "llvm/MC/MCContext.h" 29 #include "llvm/MC/MCExpr.h" 30 #include "llvm/MC/MCInst.h" 31 #include "llvm/MC/MCInstrDesc.h" 32 #include "llvm/MC/MCInstrInfo.h" 33 #include "llvm/MC/MCObjectFileInfo.h" 34 #include "llvm/MC/MCParser/MCAsmLexer.h" 35 #include "llvm/MC/MCParser/MCAsmParser.h" 36 #include "llvm/MC/MCParser/MCAsmParserExtension.h" 37 #include "llvm/MC/MCParser/MCAsmParserUtils.h" 38 #include "llvm/MC/MCParser/MCParsedAsmOperand.h" 39 #include "llvm/MC/MCParser/MCTargetAsmParser.h" 40 #include "llvm/MC/MCRegisterInfo.h" 41 #include "llvm/MC/MCSection.h" 42 #include "llvm/MC/MCStreamer.h" 43 #include "llvm/MC/MCSubtargetInfo.h" 44 #include "llvm/MC/MCSymbol.h" 45 #include "llvm/MC/SubtargetFeature.h" 46 #include "llvm/Support/ARMBuildAttributes.h" 47 #include "llvm/Support/ARMEHABI.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/CommandLine.h" 50 #include "llvm/Support/Compiler.h" 51 #include "llvm/Support/ErrorHandling.h" 52 #include "llvm/Support/MathExtras.h" 53 #include "llvm/Support/SMLoc.h" 54 #include "llvm/Support/TargetParser.h" 55 #include "llvm/Support/TargetRegistry.h" 56 #include "llvm/Support/raw_ostream.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <cstddef> 60 #include <cstdint> 61 #include <iterator> 62 #include <limits> 63 #include <memory> 64 #include <string> 65 #include <utility> 66 #include <vector> 67 68 #define DEBUG_TYPE "asm-parser" 69 70 using namespace llvm; 71 72 namespace llvm { 73 extern const MCInstrDesc ARMInsts[]; 74 } // end namespace llvm 75 76 namespace { 77 78 enum class ImplicitItModeTy { Always, Never, ARMOnly, ThumbOnly }; 79 80 static cl::opt<ImplicitItModeTy> ImplicitItMode( 81 "arm-implicit-it", cl::init(ImplicitItModeTy::ARMOnly), 82 cl::desc("Allow conditional instructions outdside of an IT block"), 83 cl::values(clEnumValN(ImplicitItModeTy::Always, "always", 84 "Accept in both ISAs, emit implicit ITs in Thumb"), 85 clEnumValN(ImplicitItModeTy::Never, "never", 86 "Warn in ARM, reject in Thumb"), 87 clEnumValN(ImplicitItModeTy::ARMOnly, "arm", 88 "Accept in ARM, reject in Thumb"), 89 clEnumValN(ImplicitItModeTy::ThumbOnly, "thumb", 90 "Warn in ARM, emit implicit ITs in Thumb"))); 91 92 static cl::opt<bool> AddBuildAttributes("arm-add-build-attributes", 93 cl::init(false)); 94 95 enum VectorLaneTy { NoLanes, AllLanes, IndexedLane }; 96 97 static inline unsigned extractITMaskBit(unsigned Mask, unsigned Position) { 98 // Position==0 means we're not in an IT block at all. Position==1 99 // means we want the first state bit, which is always 0 (Then). 100 // Position==2 means we want the second state bit, stored at bit 3 101 // of Mask, and so on downwards. So (5 - Position) will shift the 102 // right bit down to bit 0, including the always-0 bit at bit 4 for 103 // the mandatory initial Then. 104 return (Mask >> (5 - Position) & 1); 105 } 106 107 class UnwindContext { 108 using Locs = SmallVector<SMLoc, 4>; 109 110 MCAsmParser &Parser; 111 Locs FnStartLocs; 112 Locs CantUnwindLocs; 113 Locs PersonalityLocs; 114 Locs PersonalityIndexLocs; 115 Locs HandlerDataLocs; 116 int FPReg; 117 118 public: 119 UnwindContext(MCAsmParser &P) : Parser(P), FPReg(ARM::SP) {} 120 121 bool hasFnStart() const { return !FnStartLocs.empty(); } 122 bool cantUnwind() const { return !CantUnwindLocs.empty(); } 123 bool hasHandlerData() const { return !HandlerDataLocs.empty(); } 124 125 bool hasPersonality() const { 126 return !(PersonalityLocs.empty() && PersonalityIndexLocs.empty()); 127 } 128 129 void recordFnStart(SMLoc L) { FnStartLocs.push_back(L); } 130 void recordCantUnwind(SMLoc L) { CantUnwindLocs.push_back(L); } 131 void recordPersonality(SMLoc L) { PersonalityLocs.push_back(L); } 132 void recordHandlerData(SMLoc L) { HandlerDataLocs.push_back(L); } 133 void recordPersonalityIndex(SMLoc L) { PersonalityIndexLocs.push_back(L); } 134 135 void saveFPReg(int Reg) { FPReg = Reg; } 136 int getFPReg() const { return FPReg; } 137 138 void emitFnStartLocNotes() const { 139 for (Locs::const_iterator FI = FnStartLocs.begin(), FE = FnStartLocs.end(); 140 FI != FE; ++FI) 141 Parser.Note(*FI, ".fnstart was specified here"); 142 } 143 144 void emitCantUnwindLocNotes() const { 145 for (Locs::const_iterator UI = CantUnwindLocs.begin(), 146 UE = CantUnwindLocs.end(); UI != UE; ++UI) 147 Parser.Note(*UI, ".cantunwind was specified here"); 148 } 149 150 void emitHandlerDataLocNotes() const { 151 for (Locs::const_iterator HI = HandlerDataLocs.begin(), 152 HE = HandlerDataLocs.end(); HI != HE; ++HI) 153 Parser.Note(*HI, ".handlerdata was specified here"); 154 } 155 156 void emitPersonalityLocNotes() const { 157 for (Locs::const_iterator PI = PersonalityLocs.begin(), 158 PE = PersonalityLocs.end(), 159 PII = PersonalityIndexLocs.begin(), 160 PIE = PersonalityIndexLocs.end(); 161 PI != PE || PII != PIE;) { 162 if (PI != PE && (PII == PIE || PI->getPointer() < PII->getPointer())) 163 Parser.Note(*PI++, ".personality was specified here"); 164 else if (PII != PIE && (PI == PE || PII->getPointer() < PI->getPointer())) 165 Parser.Note(*PII++, ".personalityindex was specified here"); 166 else 167 llvm_unreachable(".personality and .personalityindex cannot be " 168 "at the same location"); 169 } 170 } 171 172 void reset() { 173 FnStartLocs = Locs(); 174 CantUnwindLocs = Locs(); 175 PersonalityLocs = Locs(); 176 HandlerDataLocs = Locs(); 177 PersonalityIndexLocs = Locs(); 178 FPReg = ARM::SP; 179 } 180 }; 181 182 183 class ARMAsmParser : public MCTargetAsmParser { 184 const MCRegisterInfo *MRI; 185 UnwindContext UC; 186 187 ARMTargetStreamer &getTargetStreamer() { 188 assert(getParser().getStreamer().getTargetStreamer() && 189 "do not have a target streamer"); 190 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer(); 191 return static_cast<ARMTargetStreamer &>(TS); 192 } 193 194 // Map of register aliases registers via the .req directive. 195 StringMap<unsigned> RegisterReqs; 196 197 bool NextSymbolIsThumb; 198 199 bool useImplicitITThumb() const { 200 return ImplicitItMode == ImplicitItModeTy::Always || 201 ImplicitItMode == ImplicitItModeTy::ThumbOnly; 202 } 203 204 bool useImplicitITARM() const { 205 return ImplicitItMode == ImplicitItModeTy::Always || 206 ImplicitItMode == ImplicitItModeTy::ARMOnly; 207 } 208 209 struct { 210 ARMCC::CondCodes Cond; // Condition for IT block. 211 unsigned Mask:4; // Condition mask for instructions. 212 // Starting at first 1 (from lsb). 213 // '1' condition as indicated in IT. 214 // '0' inverse of condition (else). 215 // Count of instructions in IT block is 216 // 4 - trailingzeroes(mask) 217 // Note that this does not have the same encoding 218 // as in the IT instruction, which also depends 219 // on the low bit of the condition code. 220 221 unsigned CurPosition; // Current position in parsing of IT 222 // block. In range [0,4], with 0 being the IT 223 // instruction itself. Initialized according to 224 // count of instructions in block. ~0U if no 225 // active IT block. 226 227 bool IsExplicit; // true - The IT instruction was present in the 228 // input, we should not modify it. 229 // false - The IT instruction was added 230 // implicitly, we can extend it if that 231 // would be legal. 232 } ITState; 233 234 SmallVector<MCInst, 4> PendingConditionalInsts; 235 236 void flushPendingInstructions(MCStreamer &Out) override { 237 if (!inImplicitITBlock()) { 238 assert(PendingConditionalInsts.size() == 0); 239 return; 240 } 241 242 // Emit the IT instruction 243 MCInst ITInst; 244 ITInst.setOpcode(ARM::t2IT); 245 ITInst.addOperand(MCOperand::createImm(ITState.Cond)); 246 ITInst.addOperand(MCOperand::createImm(ITState.Mask)); 247 Out.EmitInstruction(ITInst, getSTI()); 248 249 // Emit the conditonal instructions 250 assert(PendingConditionalInsts.size() <= 4); 251 for (const MCInst &Inst : PendingConditionalInsts) { 252 Out.EmitInstruction(Inst, getSTI()); 253 } 254 PendingConditionalInsts.clear(); 255 256 // Clear the IT state 257 ITState.Mask = 0; 258 ITState.CurPosition = ~0U; 259 } 260 261 bool inITBlock() { return ITState.CurPosition != ~0U; } 262 bool inExplicitITBlock() { return inITBlock() && ITState.IsExplicit; } 263 bool inImplicitITBlock() { return inITBlock() && !ITState.IsExplicit; } 264 265 bool lastInITBlock() { 266 return ITState.CurPosition == 4 - countTrailingZeros(ITState.Mask); 267 } 268 269 void forwardITPosition() { 270 if (!inITBlock()) return; 271 // Move to the next instruction in the IT block, if there is one. If not, 272 // mark the block as done, except for implicit IT blocks, which we leave 273 // open until we find an instruction that can't be added to it. 274 unsigned TZ = countTrailingZeros(ITState.Mask); 275 if (++ITState.CurPosition == 5 - TZ && ITState.IsExplicit) 276 ITState.CurPosition = ~0U; // Done with the IT block after this. 277 } 278 279 // Rewind the state of the current IT block, removing the last slot from it. 280 void rewindImplicitITPosition() { 281 assert(inImplicitITBlock()); 282 assert(ITState.CurPosition > 1); 283 ITState.CurPosition--; 284 unsigned TZ = countTrailingZeros(ITState.Mask); 285 unsigned NewMask = 0; 286 NewMask |= ITState.Mask & (0xC << TZ); 287 NewMask |= 0x2 << TZ; 288 ITState.Mask = NewMask; 289 } 290 291 // Rewind the state of the current IT block, removing the last slot from it. 292 // If we were at the first slot, this closes the IT block. 293 void discardImplicitITBlock() { 294 assert(inImplicitITBlock()); 295 assert(ITState.CurPosition == 1); 296 ITState.CurPosition = ~0U; 297 } 298 299 // Return the low-subreg of a given Q register. 300 unsigned getDRegFromQReg(unsigned QReg) const { 301 return MRI->getSubReg(QReg, ARM::dsub_0); 302 } 303 304 // Get the condition code corresponding to the current IT block slot. 305 ARMCC::CondCodes currentITCond() { 306 unsigned MaskBit = extractITMaskBit(ITState.Mask, ITState.CurPosition); 307 return MaskBit ? ARMCC::getOppositeCondition(ITState.Cond) : ITState.Cond; 308 } 309 310 // Invert the condition of the current IT block slot without changing any 311 // other slots in the same block. 312 void invertCurrentITCondition() { 313 if (ITState.CurPosition == 1) { 314 ITState.Cond = ARMCC::getOppositeCondition(ITState.Cond); 315 } else { 316 ITState.Mask ^= 1 << (5 - ITState.CurPosition); 317 } 318 } 319 320 // Returns true if the current IT block is full (all 4 slots used). 321 bool isITBlockFull() { 322 return inITBlock() && (ITState.Mask & 1); 323 } 324 325 // Extend the current implicit IT block to have one more slot with the given 326 // condition code. 327 void extendImplicitITBlock(ARMCC::CondCodes Cond) { 328 assert(inImplicitITBlock()); 329 assert(!isITBlockFull()); 330 assert(Cond == ITState.Cond || 331 Cond == ARMCC::getOppositeCondition(ITState.Cond)); 332 unsigned TZ = countTrailingZeros(ITState.Mask); 333 unsigned NewMask = 0; 334 // Keep any existing condition bits. 335 NewMask |= ITState.Mask & (0xE << TZ); 336 // Insert the new condition bit. 337 NewMask |= (Cond != ITState.Cond) << TZ; 338 // Move the trailing 1 down one bit. 339 NewMask |= 1 << (TZ - 1); 340 ITState.Mask = NewMask; 341 } 342 343 // Create a new implicit IT block with a dummy condition code. 344 void startImplicitITBlock() { 345 assert(!inITBlock()); 346 ITState.Cond = ARMCC::AL; 347 ITState.Mask = 8; 348 ITState.CurPosition = 1; 349 ITState.IsExplicit = false; 350 } 351 352 // Create a new explicit IT block with the given condition and mask. 353 // The mask should be in the format used in ARMOperand and 354 // MCOperand, with a 1 implying 'e', regardless of the low bit of 355 // the condition. 356 void startExplicitITBlock(ARMCC::CondCodes Cond, unsigned Mask) { 357 assert(!inITBlock()); 358 ITState.Cond = Cond; 359 ITState.Mask = Mask; 360 ITState.CurPosition = 0; 361 ITState.IsExplicit = true; 362 } 363 364 struct { 365 unsigned Mask : 4; 366 unsigned CurPosition; 367 } VPTState; 368 bool inVPTBlock() { return VPTState.CurPosition != ~0U; } 369 void forwardVPTPosition() { 370 if (!inVPTBlock()) return; 371 unsigned TZ = countTrailingZeros(VPTState.Mask); 372 if (++VPTState.CurPosition == 5 - TZ) 373 VPTState.CurPosition = ~0U; 374 } 375 376 void Note(SMLoc L, const Twine &Msg, SMRange Range = None) { 377 return getParser().Note(L, Msg, Range); 378 } 379 380 bool Warning(SMLoc L, const Twine &Msg, SMRange Range = None) { 381 return getParser().Warning(L, Msg, Range); 382 } 383 384 bool Error(SMLoc L, const Twine &Msg, SMRange Range = None) { 385 return getParser().Error(L, Msg, Range); 386 } 387 388 bool validatetLDMRegList(const MCInst &Inst, const OperandVector &Operands, 389 unsigned ListNo, bool IsARPop = false); 390 bool validatetSTMRegList(const MCInst &Inst, const OperandVector &Operands, 391 unsigned ListNo); 392 393 int tryParseRegister(); 394 bool tryParseRegisterWithWriteBack(OperandVector &); 395 int tryParseShiftRegister(OperandVector &); 396 bool parseRegisterList(OperandVector &, bool EnforceOrder = true); 397 bool parseMemory(OperandVector &); 398 bool parseOperand(OperandVector &, StringRef Mnemonic); 399 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 400 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 401 unsigned &ShiftAmount); 402 bool parseLiteralValues(unsigned Size, SMLoc L); 403 bool parseDirectiveThumb(SMLoc L); 404 bool parseDirectiveARM(SMLoc L); 405 bool parseDirectiveThumbFunc(SMLoc L); 406 bool parseDirectiveCode(SMLoc L); 407 bool parseDirectiveSyntax(SMLoc L); 408 bool parseDirectiveReq(StringRef Name, SMLoc L); 409 bool parseDirectiveUnreq(SMLoc L); 410 bool parseDirectiveArch(SMLoc L); 411 bool parseDirectiveEabiAttr(SMLoc L); 412 bool parseDirectiveCPU(SMLoc L); 413 bool parseDirectiveFPU(SMLoc L); 414 bool parseDirectiveFnStart(SMLoc L); 415 bool parseDirectiveFnEnd(SMLoc L); 416 bool parseDirectiveCantUnwind(SMLoc L); 417 bool parseDirectivePersonality(SMLoc L); 418 bool parseDirectiveHandlerData(SMLoc L); 419 bool parseDirectiveSetFP(SMLoc L); 420 bool parseDirectivePad(SMLoc L); 421 bool parseDirectiveRegSave(SMLoc L, bool IsVector); 422 bool parseDirectiveInst(SMLoc L, char Suffix = '\0'); 423 bool parseDirectiveLtorg(SMLoc L); 424 bool parseDirectiveEven(SMLoc L); 425 bool parseDirectivePersonalityIndex(SMLoc L); 426 bool parseDirectiveUnwindRaw(SMLoc L); 427 bool parseDirectiveTLSDescSeq(SMLoc L); 428 bool parseDirectiveMovSP(SMLoc L); 429 bool parseDirectiveObjectArch(SMLoc L); 430 bool parseDirectiveArchExtension(SMLoc L); 431 bool parseDirectiveAlign(SMLoc L); 432 bool parseDirectiveThumbSet(SMLoc L); 433 434 bool isMnemonicVPTPredicable(StringRef Mnemonic, StringRef ExtraToken); 435 StringRef splitMnemonic(StringRef Mnemonic, StringRef ExtraToken, 436 unsigned &PredicationCode, 437 unsigned &VPTPredicationCode, bool &CarrySetting, 438 unsigned &ProcessorIMod, StringRef &ITMask); 439 void getMnemonicAcceptInfo(StringRef Mnemonic, StringRef ExtraToken, 440 StringRef FullInst, bool &CanAcceptCarrySet, 441 bool &CanAcceptPredicationCode, 442 bool &CanAcceptVPTPredicationCode); 443 444 void tryConvertingToTwoOperandForm(StringRef Mnemonic, bool CarrySetting, 445 OperandVector &Operands); 446 bool isThumb() const { 447 // FIXME: Can tablegen auto-generate this? 448 return getSTI().getFeatureBits()[ARM::ModeThumb]; 449 } 450 451 bool isThumbOne() const { 452 return isThumb() && !getSTI().getFeatureBits()[ARM::FeatureThumb2]; 453 } 454 455 bool isThumbTwo() const { 456 return isThumb() && getSTI().getFeatureBits()[ARM::FeatureThumb2]; 457 } 458 459 bool hasThumb() const { 460 return getSTI().getFeatureBits()[ARM::HasV4TOps]; 461 } 462 463 bool hasThumb2() const { 464 return getSTI().getFeatureBits()[ARM::FeatureThumb2]; 465 } 466 467 bool hasV6Ops() const { 468 return getSTI().getFeatureBits()[ARM::HasV6Ops]; 469 } 470 471 bool hasV6T2Ops() const { 472 return getSTI().getFeatureBits()[ARM::HasV6T2Ops]; 473 } 474 475 bool hasV6MOps() const { 476 return getSTI().getFeatureBits()[ARM::HasV6MOps]; 477 } 478 479 bool hasV7Ops() const { 480 return getSTI().getFeatureBits()[ARM::HasV7Ops]; 481 } 482 483 bool hasV8Ops() const { 484 return getSTI().getFeatureBits()[ARM::HasV8Ops]; 485 } 486 487 bool hasV8MBaseline() const { 488 return getSTI().getFeatureBits()[ARM::HasV8MBaselineOps]; 489 } 490 491 bool hasV8MMainline() const { 492 return getSTI().getFeatureBits()[ARM::HasV8MMainlineOps]; 493 } 494 bool hasV8_1MMainline() const { 495 return getSTI().getFeatureBits()[ARM::HasV8_1MMainlineOps]; 496 } 497 bool hasMVE() const { 498 return getSTI().getFeatureBits()[ARM::HasMVEIntegerOps]; 499 } 500 bool hasMVEFloat() const { 501 return getSTI().getFeatureBits()[ARM::HasMVEFloatOps]; 502 } 503 bool has8MSecExt() const { 504 return getSTI().getFeatureBits()[ARM::Feature8MSecExt]; 505 } 506 507 bool hasARM() const { 508 return !getSTI().getFeatureBits()[ARM::FeatureNoARM]; 509 } 510 511 bool hasDSP() const { 512 return getSTI().getFeatureBits()[ARM::FeatureDSP]; 513 } 514 515 bool hasD32() const { 516 return getSTI().getFeatureBits()[ARM::FeatureD32]; 517 } 518 519 bool hasV8_1aOps() const { 520 return getSTI().getFeatureBits()[ARM::HasV8_1aOps]; 521 } 522 523 bool hasRAS() const { 524 return getSTI().getFeatureBits()[ARM::FeatureRAS]; 525 } 526 527 void SwitchMode() { 528 MCSubtargetInfo &STI = copySTI(); 529 auto FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 530 setAvailableFeatures(FB); 531 } 532 533 void FixModeAfterArchChange(bool WasThumb, SMLoc Loc); 534 535 bool isMClass() const { 536 return getSTI().getFeatureBits()[ARM::FeatureMClass]; 537 } 538 539 /// @name Auto-generated Match Functions 540 /// { 541 542 #define GET_ASSEMBLER_HEADER 543 #include "ARMGenAsmMatcher.inc" 544 545 /// } 546 547 OperandMatchResultTy parseITCondCode(OperandVector &); 548 OperandMatchResultTy parseCoprocNumOperand(OperandVector &); 549 OperandMatchResultTy parseCoprocRegOperand(OperandVector &); 550 OperandMatchResultTy parseCoprocOptionOperand(OperandVector &); 551 OperandMatchResultTy parseMemBarrierOptOperand(OperandVector &); 552 OperandMatchResultTy parseTraceSyncBarrierOptOperand(OperandVector &); 553 OperandMatchResultTy parseInstSyncBarrierOptOperand(OperandVector &); 554 OperandMatchResultTy parseProcIFlagsOperand(OperandVector &); 555 OperandMatchResultTy parseMSRMaskOperand(OperandVector &); 556 OperandMatchResultTy parseBankedRegOperand(OperandVector &); 557 OperandMatchResultTy parsePKHImm(OperandVector &O, StringRef Op, int Low, 558 int High); 559 OperandMatchResultTy parsePKHLSLImm(OperandVector &O) { 560 return parsePKHImm(O, "lsl", 0, 31); 561 } 562 OperandMatchResultTy parsePKHASRImm(OperandVector &O) { 563 return parsePKHImm(O, "asr", 1, 32); 564 } 565 OperandMatchResultTy parseSetEndImm(OperandVector &); 566 OperandMatchResultTy parseShifterImm(OperandVector &); 567 OperandMatchResultTy parseRotImm(OperandVector &); 568 OperandMatchResultTy parseModImm(OperandVector &); 569 OperandMatchResultTy parseBitfield(OperandVector &); 570 OperandMatchResultTy parsePostIdxReg(OperandVector &); 571 OperandMatchResultTy parseAM3Offset(OperandVector &); 572 OperandMatchResultTy parseFPImm(OperandVector &); 573 OperandMatchResultTy parseVectorList(OperandVector &); 574 OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, 575 SMLoc &EndLoc); 576 577 // Asm Match Converter Methods 578 void cvtThumbMultiply(MCInst &Inst, const OperandVector &); 579 void cvtThumbBranches(MCInst &Inst, const OperandVector &); 580 581 bool validateInstruction(MCInst &Inst, const OperandVector &Ops); 582 bool processInstruction(MCInst &Inst, const OperandVector &Ops, MCStreamer &Out); 583 bool shouldOmitCCOutOperand(StringRef Mnemonic, OperandVector &Operands); 584 bool shouldOmitPredicateOperand(StringRef Mnemonic, OperandVector &Operands); 585 bool shouldOmitVectorPredicateOperand(StringRef Mnemonic, OperandVector &Operands); 586 bool isITBlockTerminator(MCInst &Inst) const; 587 void fixupGNULDRDAlias(StringRef Mnemonic, OperandVector &Operands); 588 bool validateLDRDSTRD(MCInst &Inst, const OperandVector &Operands, 589 bool Load, bool ARMMode, bool Writeback); 590 591 public: 592 enum ARMMatchResultTy { 593 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 594 Match_RequiresNotITBlock, 595 Match_RequiresV6, 596 Match_RequiresThumb2, 597 Match_RequiresV8, 598 Match_RequiresFlagSetting, 599 #define GET_OPERAND_DIAGNOSTIC_TYPES 600 #include "ARMGenAsmMatcher.inc" 601 602 }; 603 604 ARMAsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser, 605 const MCInstrInfo &MII, const MCTargetOptions &Options) 606 : MCTargetAsmParser(Options, STI, MII), UC(Parser) { 607 MCAsmParserExtension::Initialize(Parser); 608 609 // Cache the MCRegisterInfo. 610 MRI = getContext().getRegisterInfo(); 611 612 // Initialize the set of available features. 613 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 614 615 // Add build attributes based on the selected target. 616 if (AddBuildAttributes) 617 getTargetStreamer().emitTargetAttributes(STI); 618 619 // Not in an ITBlock to start with. 620 ITState.CurPosition = ~0U; 621 622 VPTState.CurPosition = ~0U; 623 624 NextSymbolIsThumb = false; 625 } 626 627 // Implementation of the MCTargetAsmParser interface: 628 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override; 629 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name, 630 SMLoc NameLoc, OperandVector &Operands) override; 631 bool ParseDirective(AsmToken DirectiveID) override; 632 633 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op, 634 unsigned Kind) override; 635 unsigned checkTargetMatchPredicate(MCInst &Inst) override; 636 637 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, 638 OperandVector &Operands, MCStreamer &Out, 639 uint64_t &ErrorInfo, 640 bool MatchingInlineAsm) override; 641 unsigned MatchInstruction(OperandVector &Operands, MCInst &Inst, 642 SmallVectorImpl<NearMissInfo> &NearMisses, 643 bool MatchingInlineAsm, bool &EmitInITBlock, 644 MCStreamer &Out); 645 646 struct NearMissMessage { 647 SMLoc Loc; 648 SmallString<128> Message; 649 }; 650 651 const char *getCustomOperandDiag(ARMMatchResultTy MatchError); 652 653 void FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn, 654 SmallVectorImpl<NearMissMessage> &NearMissesOut, 655 SMLoc IDLoc, OperandVector &Operands); 656 void ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, SMLoc IDLoc, 657 OperandVector &Operands); 658 659 void doBeforeLabelEmit(MCSymbol *Symbol) override; 660 661 void onLabelParsed(MCSymbol *Symbol) override; 662 }; 663 664 /// ARMOperand - Instances of this class represent a parsed ARM machine 665 /// operand. 666 class ARMOperand : public MCParsedAsmOperand { 667 enum KindTy { 668 k_CondCode, 669 k_VPTPred, 670 k_CCOut, 671 k_ITCondMask, 672 k_CoprocNum, 673 k_CoprocReg, 674 k_CoprocOption, 675 k_Immediate, 676 k_MemBarrierOpt, 677 k_InstSyncBarrierOpt, 678 k_TraceSyncBarrierOpt, 679 k_Memory, 680 k_PostIndexRegister, 681 k_MSRMask, 682 k_BankedReg, 683 k_ProcIFlags, 684 k_VectorIndex, 685 k_Register, 686 k_RegisterList, 687 k_RegisterListWithAPSR, 688 k_DPRRegisterList, 689 k_SPRRegisterList, 690 k_FPSRegisterListWithVPR, 691 k_FPDRegisterListWithVPR, 692 k_VectorList, 693 k_VectorListAllLanes, 694 k_VectorListIndexed, 695 k_ShiftedRegister, 696 k_ShiftedImmediate, 697 k_ShifterImmediate, 698 k_RotateImmediate, 699 k_ModifiedImmediate, 700 k_ConstantPoolImmediate, 701 k_BitfieldDescriptor, 702 k_Token, 703 } Kind; 704 705 SMLoc StartLoc, EndLoc, AlignmentLoc; 706 SmallVector<unsigned, 8> Registers; 707 708 struct CCOp { 709 ARMCC::CondCodes Val; 710 }; 711 712 struct VCCOp { 713 ARMVCC::VPTCodes Val; 714 }; 715 716 struct CopOp { 717 unsigned Val; 718 }; 719 720 struct CoprocOptionOp { 721 unsigned Val; 722 }; 723 724 struct ITMaskOp { 725 unsigned Mask:4; 726 }; 727 728 struct MBOptOp { 729 ARM_MB::MemBOpt Val; 730 }; 731 732 struct ISBOptOp { 733 ARM_ISB::InstSyncBOpt Val; 734 }; 735 736 struct TSBOptOp { 737 ARM_TSB::TraceSyncBOpt Val; 738 }; 739 740 struct IFlagsOp { 741 ARM_PROC::IFlags Val; 742 }; 743 744 struct MMaskOp { 745 unsigned Val; 746 }; 747 748 struct BankedRegOp { 749 unsigned Val; 750 }; 751 752 struct TokOp { 753 const char *Data; 754 unsigned Length; 755 }; 756 757 struct RegOp { 758 unsigned RegNum; 759 }; 760 761 // A vector register list is a sequential list of 1 to 4 registers. 762 struct VectorListOp { 763 unsigned RegNum; 764 unsigned Count; 765 unsigned LaneIndex; 766 bool isDoubleSpaced; 767 }; 768 769 struct VectorIndexOp { 770 unsigned Val; 771 }; 772 773 struct ImmOp { 774 const MCExpr *Val; 775 }; 776 777 /// Combined record for all forms of ARM address expressions. 778 struct MemoryOp { 779 unsigned BaseRegNum; 780 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 781 // was specified. 782 const MCConstantExpr *OffsetImm; // Offset immediate value 783 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 784 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 785 unsigned ShiftImm; // shift for OffsetReg. 786 unsigned Alignment; // 0 = no alignment specified 787 // n = alignment in bytes (2, 4, 8, 16, or 32) 788 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 789 }; 790 791 struct PostIdxRegOp { 792 unsigned RegNum; 793 bool isAdd; 794 ARM_AM::ShiftOpc ShiftTy; 795 unsigned ShiftImm; 796 }; 797 798 struct ShifterImmOp { 799 bool isASR; 800 unsigned Imm; 801 }; 802 803 struct RegShiftedRegOp { 804 ARM_AM::ShiftOpc ShiftTy; 805 unsigned SrcReg; 806 unsigned ShiftReg; 807 unsigned ShiftImm; 808 }; 809 810 struct RegShiftedImmOp { 811 ARM_AM::ShiftOpc ShiftTy; 812 unsigned SrcReg; 813 unsigned ShiftImm; 814 }; 815 816 struct RotImmOp { 817 unsigned Imm; 818 }; 819 820 struct ModImmOp { 821 unsigned Bits; 822 unsigned Rot; 823 }; 824 825 struct BitfieldOp { 826 unsigned LSB; 827 unsigned Width; 828 }; 829 830 union { 831 struct CCOp CC; 832 struct VCCOp VCC; 833 struct CopOp Cop; 834 struct CoprocOptionOp CoprocOption; 835 struct MBOptOp MBOpt; 836 struct ISBOptOp ISBOpt; 837 struct TSBOptOp TSBOpt; 838 struct ITMaskOp ITMask; 839 struct IFlagsOp IFlags; 840 struct MMaskOp MMask; 841 struct BankedRegOp BankedReg; 842 struct TokOp Tok; 843 struct RegOp Reg; 844 struct VectorListOp VectorList; 845 struct VectorIndexOp VectorIndex; 846 struct ImmOp Imm; 847 struct MemoryOp Memory; 848 struct PostIdxRegOp PostIdxReg; 849 struct ShifterImmOp ShifterImm; 850 struct RegShiftedRegOp RegShiftedReg; 851 struct RegShiftedImmOp RegShiftedImm; 852 struct RotImmOp RotImm; 853 struct ModImmOp ModImm; 854 struct BitfieldOp Bitfield; 855 }; 856 857 public: 858 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 859 860 /// getStartLoc - Get the location of the first token of this operand. 861 SMLoc getStartLoc() const override { return StartLoc; } 862 863 /// getEndLoc - Get the location of the last token of this operand. 864 SMLoc getEndLoc() const override { return EndLoc; } 865 866 /// getLocRange - Get the range between the first and last token of this 867 /// operand. 868 SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); } 869 870 /// getAlignmentLoc - Get the location of the Alignment token of this operand. 871 SMLoc getAlignmentLoc() const { 872 assert(Kind == k_Memory && "Invalid access!"); 873 return AlignmentLoc; 874 } 875 876 ARMCC::CondCodes getCondCode() const { 877 assert(Kind == k_CondCode && "Invalid access!"); 878 return CC.Val; 879 } 880 881 ARMVCC::VPTCodes getVPTPred() const { 882 assert(isVPTPred() && "Invalid access!"); 883 return VCC.Val; 884 } 885 886 unsigned getCoproc() const { 887 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 888 return Cop.Val; 889 } 890 891 StringRef getToken() const { 892 assert(Kind == k_Token && "Invalid access!"); 893 return StringRef(Tok.Data, Tok.Length); 894 } 895 896 unsigned getReg() const override { 897 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 898 return Reg.RegNum; 899 } 900 901 const SmallVectorImpl<unsigned> &getRegList() const { 902 assert((Kind == k_RegisterList || Kind == k_RegisterListWithAPSR || 903 Kind == k_DPRRegisterList || Kind == k_SPRRegisterList || 904 Kind == k_FPSRegisterListWithVPR || 905 Kind == k_FPDRegisterListWithVPR) && 906 "Invalid access!"); 907 return Registers; 908 } 909 910 const MCExpr *getImm() const { 911 assert(isImm() && "Invalid access!"); 912 return Imm.Val; 913 } 914 915 const MCExpr *getConstantPoolImm() const { 916 assert(isConstantPoolImm() && "Invalid access!"); 917 return Imm.Val; 918 } 919 920 unsigned getVectorIndex() const { 921 assert(Kind == k_VectorIndex && "Invalid access!"); 922 return VectorIndex.Val; 923 } 924 925 ARM_MB::MemBOpt getMemBarrierOpt() const { 926 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 927 return MBOpt.Val; 928 } 929 930 ARM_ISB::InstSyncBOpt getInstSyncBarrierOpt() const { 931 assert(Kind == k_InstSyncBarrierOpt && "Invalid access!"); 932 return ISBOpt.Val; 933 } 934 935 ARM_TSB::TraceSyncBOpt getTraceSyncBarrierOpt() const { 936 assert(Kind == k_TraceSyncBarrierOpt && "Invalid access!"); 937 return TSBOpt.Val; 938 } 939 940 ARM_PROC::IFlags getProcIFlags() const { 941 assert(Kind == k_ProcIFlags && "Invalid access!"); 942 return IFlags.Val; 943 } 944 945 unsigned getMSRMask() const { 946 assert(Kind == k_MSRMask && "Invalid access!"); 947 return MMask.Val; 948 } 949 950 unsigned getBankedReg() const { 951 assert(Kind == k_BankedReg && "Invalid access!"); 952 return BankedReg.Val; 953 } 954 955 bool isCoprocNum() const { return Kind == k_CoprocNum; } 956 bool isCoprocReg() const { return Kind == k_CoprocReg; } 957 bool isCoprocOption() const { return Kind == k_CoprocOption; } 958 bool isCondCode() const { return Kind == k_CondCode; } 959 bool isVPTPred() const { return Kind == k_VPTPred; } 960 bool isCCOut() const { return Kind == k_CCOut; } 961 bool isITMask() const { return Kind == k_ITCondMask; } 962 bool isITCondCode() const { return Kind == k_CondCode; } 963 bool isImm() const override { 964 return Kind == k_Immediate; 965 } 966 967 bool isARMBranchTarget() const { 968 if (!isImm()) return false; 969 970 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) 971 return CE->getValue() % 4 == 0; 972 return true; 973 } 974 975 976 bool isThumbBranchTarget() const { 977 if (!isImm()) return false; 978 979 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) 980 return CE->getValue() % 2 == 0; 981 return true; 982 } 983 984 // checks whether this operand is an unsigned offset which fits is a field 985 // of specified width and scaled by a specific number of bits 986 template<unsigned width, unsigned scale> 987 bool isUnsignedOffset() const { 988 if (!isImm()) return false; 989 if (isa<MCSymbolRefExpr>(Imm.Val)) return true; 990 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { 991 int64_t Val = CE->getValue(); 992 int64_t Align = 1LL << scale; 993 int64_t Max = Align * ((1LL << width) - 1); 994 return ((Val % Align) == 0) && (Val >= 0) && (Val <= Max); 995 } 996 return false; 997 } 998 999 // checks whether this operand is an signed offset which fits is a field 1000 // of specified width and scaled by a specific number of bits 1001 template<unsigned width, unsigned scale> 1002 bool isSignedOffset() const { 1003 if (!isImm()) return false; 1004 if (isa<MCSymbolRefExpr>(Imm.Val)) return true; 1005 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) { 1006 int64_t Val = CE->getValue(); 1007 int64_t Align = 1LL << scale; 1008 int64_t Max = Align * ((1LL << (width-1)) - 1); 1009 int64_t Min = -Align * (1LL << (width-1)); 1010 return ((Val % Align) == 0) && (Val >= Min) && (Val <= Max); 1011 } 1012 return false; 1013 } 1014 1015 // checks whether this operand is a memory operand computed as an offset 1016 // applied to PC. the offset may have 8 bits of magnitude and is represented 1017 // with two bits of shift. textually it may be either [pc, #imm], #imm or 1018 // relocable expression... 1019 bool isThumbMemPC() const { 1020 int64_t Val = 0; 1021 if (isImm()) { 1022 if (isa<MCSymbolRefExpr>(Imm.Val)) return true; 1023 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val); 1024 if (!CE) return false; 1025 Val = CE->getValue(); 1026 } 1027 else if (isMem()) { 1028 if(!Memory.OffsetImm || Memory.OffsetRegNum) return false; 1029 if(Memory.BaseRegNum != ARM::PC) return false; 1030 Val = Memory.OffsetImm->getValue(); 1031 } 1032 else return false; 1033 return ((Val % 4) == 0) && (Val >= 0) && (Val <= 1020); 1034 } 1035 1036 bool isFPImm() const { 1037 if (!isImm()) return false; 1038 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1039 if (!CE) return false; 1040 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); 1041 return Val != -1; 1042 } 1043 1044 template<int64_t N, int64_t M> 1045 bool isImmediate() const { 1046 if (!isImm()) return false; 1047 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1048 if (!CE) return false; 1049 int64_t Value = CE->getValue(); 1050 return Value >= N && Value <= M; 1051 } 1052 1053 template<int64_t N, int64_t M> 1054 bool isImmediateS4() const { 1055 if (!isImm()) return false; 1056 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1057 if (!CE) return false; 1058 int64_t Value = CE->getValue(); 1059 return ((Value & 3) == 0) && Value >= N && Value <= M; 1060 } 1061 1062 bool isFBits16() const { 1063 return isImmediate<0, 17>(); 1064 } 1065 bool isFBits32() const { 1066 return isImmediate<1, 33>(); 1067 } 1068 bool isImm8s4() const { 1069 return isImmediateS4<-1020, 1020>(); 1070 } 1071 bool isImm7s4() const { 1072 return isImmediateS4<-508, 508>(); 1073 } 1074 bool isImm0_1020s4() const { 1075 return isImmediateS4<0, 1020>(); 1076 } 1077 bool isImm0_508s4() const { 1078 return isImmediateS4<0, 508>(); 1079 } 1080 bool isImm0_508s4Neg() const { 1081 if (!isImm()) return false; 1082 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1083 if (!CE) return false; 1084 int64_t Value = -CE->getValue(); 1085 // explicitly exclude zero. we want that to use the normal 0_508 version. 1086 return ((Value & 3) == 0) && Value > 0 && Value <= 508; 1087 } 1088 1089 bool isImm0_4095Neg() const { 1090 if (!isImm()) return false; 1091 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1092 if (!CE) return false; 1093 // isImm0_4095Neg is used with 32-bit immediates only. 1094 // 32-bit immediates are zero extended to 64-bit when parsed, 1095 // thus simple -CE->getValue() results in a big negative number, 1096 // not a small positive number as intended 1097 if ((CE->getValue() >> 32) > 0) return false; 1098 uint32_t Value = -static_cast<uint32_t>(CE->getValue()); 1099 return Value > 0 && Value < 4096; 1100 } 1101 1102 bool isImm0_7() const { 1103 return isImmediate<0, 7>(); 1104 } 1105 1106 bool isImm1_16() const { 1107 return isImmediate<1, 16>(); 1108 } 1109 1110 bool isImm1_32() const { 1111 return isImmediate<1, 32>(); 1112 } 1113 1114 bool isImm8_255() const { 1115 return isImmediate<8, 255>(); 1116 } 1117 1118 bool isImm256_65535Expr() const { 1119 if (!isImm()) return false; 1120 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1121 // If it's not a constant expression, it'll generate a fixup and be 1122 // handled later. 1123 if (!CE) return true; 1124 int64_t Value = CE->getValue(); 1125 return Value >= 256 && Value < 65536; 1126 } 1127 1128 bool isImm0_65535Expr() const { 1129 if (!isImm()) return false; 1130 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1131 // If it's not a constant expression, it'll generate a fixup and be 1132 // handled later. 1133 if (!CE) return true; 1134 int64_t Value = CE->getValue(); 1135 return Value >= 0 && Value < 65536; 1136 } 1137 1138 bool isImm24bit() const { 1139 return isImmediate<0, 0xffffff + 1>(); 1140 } 1141 1142 bool isImmThumbSR() const { 1143 return isImmediate<1, 33>(); 1144 } 1145 1146 template<int shift> 1147 bool isExpImmValue(uint64_t Value) const { 1148 uint64_t mask = (1 << shift) - 1; 1149 if ((Value & mask) != 0 || (Value >> shift) > 0xff) 1150 return false; 1151 return true; 1152 } 1153 1154 template<int shift> 1155 bool isExpImm() const { 1156 if (!isImm()) return false; 1157 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1158 if (!CE) return false; 1159 1160 return isExpImmValue<shift>(CE->getValue()); 1161 } 1162 1163 template<int shift, int size> 1164 bool isInvertedExpImm() const { 1165 if (!isImm()) return false; 1166 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1167 if (!CE) return false; 1168 1169 uint64_t OriginalValue = CE->getValue(); 1170 uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1); 1171 return isExpImmValue<shift>(InvertedValue); 1172 } 1173 1174 bool isPKHLSLImm() const { 1175 return isImmediate<0, 32>(); 1176 } 1177 1178 bool isPKHASRImm() const { 1179 return isImmediate<0, 33>(); 1180 } 1181 1182 bool isAdrLabel() const { 1183 // If we have an immediate that's not a constant, treat it as a label 1184 // reference needing a fixup. 1185 if (isImm() && !isa<MCConstantExpr>(getImm())) 1186 return true; 1187 1188 // If it is a constant, it must fit into a modified immediate encoding. 1189 if (!isImm()) return false; 1190 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1191 if (!CE) return false; 1192 int64_t Value = CE->getValue(); 1193 return (ARM_AM::getSOImmVal(Value) != -1 || 1194 ARM_AM::getSOImmVal(-Value) != -1); 1195 } 1196 1197 bool isT2SOImm() const { 1198 // If we have an immediate that's not a constant, treat it as an expression 1199 // needing a fixup. 1200 if (isImm() && !isa<MCConstantExpr>(getImm())) { 1201 // We want to avoid matching :upper16: and :lower16: as we want these 1202 // expressions to match in isImm0_65535Expr() 1203 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(getImm()); 1204 return (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 && 1205 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16)); 1206 } 1207 if (!isImm()) return false; 1208 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1209 if (!CE) return false; 1210 int64_t Value = CE->getValue(); 1211 return ARM_AM::getT2SOImmVal(Value) != -1; 1212 } 1213 1214 bool isT2SOImmNot() const { 1215 if (!isImm()) return false; 1216 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1217 if (!CE) return false; 1218 int64_t Value = CE->getValue(); 1219 return ARM_AM::getT2SOImmVal(Value) == -1 && 1220 ARM_AM::getT2SOImmVal(~Value) != -1; 1221 } 1222 1223 bool isT2SOImmNeg() const { 1224 if (!isImm()) return false; 1225 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1226 if (!CE) return false; 1227 int64_t Value = CE->getValue(); 1228 // Only use this when not representable as a plain so_imm. 1229 return ARM_AM::getT2SOImmVal(Value) == -1 && 1230 ARM_AM::getT2SOImmVal(-Value) != -1; 1231 } 1232 1233 bool isSetEndImm() const { 1234 if (!isImm()) return false; 1235 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1236 if (!CE) return false; 1237 int64_t Value = CE->getValue(); 1238 return Value == 1 || Value == 0; 1239 } 1240 1241 bool isReg() const override { return Kind == k_Register; } 1242 bool isRegList() const { return Kind == k_RegisterList; } 1243 bool isRegListWithAPSR() const { 1244 return Kind == k_RegisterListWithAPSR || Kind == k_RegisterList; 1245 } 1246 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 1247 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 1248 bool isFPSRegListWithVPR() const { return Kind == k_FPSRegisterListWithVPR; } 1249 bool isFPDRegListWithVPR() const { return Kind == k_FPDRegisterListWithVPR; } 1250 bool isToken() const override { return Kind == k_Token; } 1251 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 1252 bool isInstSyncBarrierOpt() const { return Kind == k_InstSyncBarrierOpt; } 1253 bool isTraceSyncBarrierOpt() const { return Kind == k_TraceSyncBarrierOpt; } 1254 bool isMem() const override { 1255 if (Kind != k_Memory) 1256 return false; 1257 if (Memory.BaseRegNum && 1258 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.BaseRegNum)) 1259 return false; 1260 if (Memory.OffsetRegNum && 1261 !ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Memory.OffsetRegNum)) 1262 return false; 1263 return true; 1264 } 1265 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 1266 bool isRegShiftedReg() const { 1267 return Kind == k_ShiftedRegister && 1268 ARMMCRegisterClasses[ARM::GPRRegClassID].contains( 1269 RegShiftedReg.SrcReg) && 1270 ARMMCRegisterClasses[ARM::GPRRegClassID].contains( 1271 RegShiftedReg.ShiftReg); 1272 } 1273 bool isRegShiftedImm() const { 1274 return Kind == k_ShiftedImmediate && 1275 ARMMCRegisterClasses[ARM::GPRRegClassID].contains( 1276 RegShiftedImm.SrcReg); 1277 } 1278 bool isRotImm() const { return Kind == k_RotateImmediate; } 1279 bool isModImm() const { return Kind == k_ModifiedImmediate; } 1280 1281 bool isModImmNot() const { 1282 if (!isImm()) return false; 1283 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1284 if (!CE) return false; 1285 int64_t Value = CE->getValue(); 1286 return ARM_AM::getSOImmVal(~Value) != -1; 1287 } 1288 1289 bool isModImmNeg() const { 1290 if (!isImm()) return false; 1291 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1292 if (!CE) return false; 1293 int64_t Value = CE->getValue(); 1294 return ARM_AM::getSOImmVal(Value) == -1 && 1295 ARM_AM::getSOImmVal(-Value) != -1; 1296 } 1297 1298 bool isThumbModImmNeg1_7() const { 1299 if (!isImm()) return false; 1300 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1301 if (!CE) return false; 1302 int32_t Value = -(int32_t)CE->getValue(); 1303 return 0 < Value && Value < 8; 1304 } 1305 1306 bool isThumbModImmNeg8_255() const { 1307 if (!isImm()) return false; 1308 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1309 if (!CE) return false; 1310 int32_t Value = -(int32_t)CE->getValue(); 1311 return 7 < Value && Value < 256; 1312 } 1313 1314 bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; } 1315 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 1316 bool isPostIdxRegShifted() const { 1317 return Kind == k_PostIndexRegister && 1318 ARMMCRegisterClasses[ARM::GPRRegClassID].contains(PostIdxReg.RegNum); 1319 } 1320 bool isPostIdxReg() const { 1321 return isPostIdxRegShifted() && PostIdxReg.ShiftTy == ARM_AM::no_shift; 1322 } 1323 bool isMemNoOffset(bool alignOK = false, unsigned Alignment = 0) const { 1324 if (!isMem()) 1325 return false; 1326 // No offset of any kind. 1327 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr && 1328 (alignOK || Memory.Alignment == Alignment); 1329 } 1330 bool isMemNoOffsetT2(bool alignOK = false, unsigned Alignment = 0) const { 1331 if (!isMem()) 1332 return false; 1333 1334 if (!ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains( 1335 Memory.BaseRegNum)) 1336 return false; 1337 1338 // No offset of any kind. 1339 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr && 1340 (alignOK || Memory.Alignment == Alignment); 1341 } 1342 bool isMemNoOffsetT2NoSp(bool alignOK = false, unsigned Alignment = 0) const { 1343 if (!isMem()) 1344 return false; 1345 1346 if (!ARMMCRegisterClasses[ARM::rGPRRegClassID].contains( 1347 Memory.BaseRegNum)) 1348 return false; 1349 1350 // No offset of any kind. 1351 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == nullptr && 1352 (alignOK || Memory.Alignment == Alignment); 1353 } 1354 bool isMemPCRelImm12() const { 1355 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1356 return false; 1357 // Base register must be PC. 1358 if (Memory.BaseRegNum != ARM::PC) 1359 return false; 1360 // Immediate offset in range [-4095, 4095]. 1361 if (!Memory.OffsetImm) return true; 1362 int64_t Val = Memory.OffsetImm->getValue(); 1363 return (Val > -4096 && Val < 4096) || 1364 (Val == std::numeric_limits<int32_t>::min()); 1365 } 1366 1367 bool isAlignedMemory() const { 1368 return isMemNoOffset(true); 1369 } 1370 1371 bool isAlignedMemoryNone() const { 1372 return isMemNoOffset(false, 0); 1373 } 1374 1375 bool isDupAlignedMemoryNone() const { 1376 return isMemNoOffset(false, 0); 1377 } 1378 1379 bool isAlignedMemory16() const { 1380 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2. 1381 return true; 1382 return isMemNoOffset(false, 0); 1383 } 1384 1385 bool isDupAlignedMemory16() const { 1386 if (isMemNoOffset(false, 2)) // alignment in bytes for 16-bits is 2. 1387 return true; 1388 return isMemNoOffset(false, 0); 1389 } 1390 1391 bool isAlignedMemory32() const { 1392 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4. 1393 return true; 1394 return isMemNoOffset(false, 0); 1395 } 1396 1397 bool isDupAlignedMemory32() const { 1398 if (isMemNoOffset(false, 4)) // alignment in bytes for 32-bits is 4. 1399 return true; 1400 return isMemNoOffset(false, 0); 1401 } 1402 1403 bool isAlignedMemory64() const { 1404 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. 1405 return true; 1406 return isMemNoOffset(false, 0); 1407 } 1408 1409 bool isDupAlignedMemory64() const { 1410 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. 1411 return true; 1412 return isMemNoOffset(false, 0); 1413 } 1414 1415 bool isAlignedMemory64or128() const { 1416 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. 1417 return true; 1418 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16. 1419 return true; 1420 return isMemNoOffset(false, 0); 1421 } 1422 1423 bool isDupAlignedMemory64or128() const { 1424 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. 1425 return true; 1426 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16. 1427 return true; 1428 return isMemNoOffset(false, 0); 1429 } 1430 1431 bool isAlignedMemory64or128or256() const { 1432 if (isMemNoOffset(false, 8)) // alignment in bytes for 64-bits is 8. 1433 return true; 1434 if (isMemNoOffset(false, 16)) // alignment in bytes for 128-bits is 16. 1435 return true; 1436 if (isMemNoOffset(false, 32)) // alignment in bytes for 256-bits is 32. 1437 return true; 1438 return isMemNoOffset(false, 0); 1439 } 1440 1441 bool isAddrMode2() const { 1442 if (!isMem() || Memory.Alignment != 0) return false; 1443 // Check for register offset. 1444 if (Memory.OffsetRegNum) return true; 1445 // Immediate offset in range [-4095, 4095]. 1446 if (!Memory.OffsetImm) return true; 1447 int64_t Val = Memory.OffsetImm->getValue(); 1448 return Val > -4096 && Val < 4096; 1449 } 1450 1451 bool isAM2OffsetImm() const { 1452 if (!isImm()) return false; 1453 // Immediate offset in range [-4095, 4095]. 1454 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1455 if (!CE) return false; 1456 int64_t Val = CE->getValue(); 1457 return (Val == std::numeric_limits<int32_t>::min()) || 1458 (Val > -4096 && Val < 4096); 1459 } 1460 1461 bool isAddrMode3() const { 1462 // If we have an immediate that's not a constant, treat it as a label 1463 // reference needing a fixup. If it is a constant, it's something else 1464 // and we reject it. 1465 if (isImm() && !isa<MCConstantExpr>(getImm())) 1466 return true; 1467 if (!isMem() || Memory.Alignment != 0) return false; 1468 // No shifts are legal for AM3. 1469 if (Memory.ShiftType != ARM_AM::no_shift) return false; 1470 // Check for register offset. 1471 if (Memory.OffsetRegNum) return true; 1472 // Immediate offset in range [-255, 255]. 1473 if (!Memory.OffsetImm) return true; 1474 int64_t Val = Memory.OffsetImm->getValue(); 1475 // The #-0 offset is encoded as std::numeric_limits<int32_t>::min(), and we 1476 // have to check for this too. 1477 return (Val > -256 && Val < 256) || 1478 Val == std::numeric_limits<int32_t>::min(); 1479 } 1480 1481 bool isAM3Offset() const { 1482 if (isPostIdxReg()) 1483 return true; 1484 if (!isImm()) 1485 return false; 1486 // Immediate offset in range [-255, 255]. 1487 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1488 if (!CE) return false; 1489 int64_t Val = CE->getValue(); 1490 // Special case, #-0 is std::numeric_limits<int32_t>::min(). 1491 return (Val > -256 && Val < 256) || 1492 Val == std::numeric_limits<int32_t>::min(); 1493 } 1494 1495 bool isAddrMode5() const { 1496 // If we have an immediate that's not a constant, treat it as a label 1497 // reference needing a fixup. If it is a constant, it's something else 1498 // and we reject it. 1499 if (isImm() && !isa<MCConstantExpr>(getImm())) 1500 return true; 1501 if (!isMem() || Memory.Alignment != 0) return false; 1502 // Check for register offset. 1503 if (Memory.OffsetRegNum) return false; 1504 // Immediate offset in range [-1020, 1020] and a multiple of 4. 1505 if (!Memory.OffsetImm) return true; 1506 int64_t Val = Memory.OffsetImm->getValue(); 1507 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 1508 Val == std::numeric_limits<int32_t>::min(); 1509 } 1510 1511 bool isAddrMode5FP16() const { 1512 // If we have an immediate that's not a constant, treat it as a label 1513 // reference needing a fixup. If it is a constant, it's something else 1514 // and we reject it. 1515 if (isImm() && !isa<MCConstantExpr>(getImm())) 1516 return true; 1517 if (!isMem() || Memory.Alignment != 0) return false; 1518 // Check for register offset. 1519 if (Memory.OffsetRegNum) return false; 1520 // Immediate offset in range [-510, 510] and a multiple of 2. 1521 if (!Memory.OffsetImm) return true; 1522 int64_t Val = Memory.OffsetImm->getValue(); 1523 return (Val >= -510 && Val <= 510 && ((Val & 1) == 0)) || 1524 Val == std::numeric_limits<int32_t>::min(); 1525 } 1526 1527 bool isMemTBB() const { 1528 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || 1529 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 1530 return false; 1531 return true; 1532 } 1533 1534 bool isMemTBH() const { 1535 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || 1536 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 1537 Memory.Alignment != 0 ) 1538 return false; 1539 return true; 1540 } 1541 1542 bool isMemRegOffset() const { 1543 if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0) 1544 return false; 1545 return true; 1546 } 1547 1548 bool isT2MemRegOffset() const { 1549 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || 1550 Memory.Alignment != 0 || Memory.BaseRegNum == ARM::PC) 1551 return false; 1552 // Only lsl #{0, 1, 2, 3} allowed. 1553 if (Memory.ShiftType == ARM_AM::no_shift) 1554 return true; 1555 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 1556 return false; 1557 return true; 1558 } 1559 1560 bool isMemThumbRR() const { 1561 // Thumb reg+reg addressing is simple. Just two registers, a base and 1562 // an offset. No shifts, negations or any other complicating factors. 1563 if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative || 1564 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 1565 return false; 1566 return isARMLowRegister(Memory.BaseRegNum) && 1567 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 1568 } 1569 1570 bool isMemThumbRIs4() const { 1571 if (!isMem() || Memory.OffsetRegNum != 0 || 1572 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 1573 return false; 1574 // Immediate offset, multiple of 4 in range [0, 124]. 1575 if (!Memory.OffsetImm) return true; 1576 int64_t Val = Memory.OffsetImm->getValue(); 1577 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 1578 } 1579 1580 bool isMemThumbRIs2() const { 1581 if (!isMem() || Memory.OffsetRegNum != 0 || 1582 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 1583 return false; 1584 // Immediate offset, multiple of 4 in range [0, 62]. 1585 if (!Memory.OffsetImm) return true; 1586 int64_t Val = Memory.OffsetImm->getValue(); 1587 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 1588 } 1589 1590 bool isMemThumbRIs1() const { 1591 if (!isMem() || Memory.OffsetRegNum != 0 || 1592 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 1593 return false; 1594 // Immediate offset in range [0, 31]. 1595 if (!Memory.OffsetImm) return true; 1596 int64_t Val = Memory.OffsetImm->getValue(); 1597 return Val >= 0 && Val <= 31; 1598 } 1599 1600 bool isMemThumbSPI() const { 1601 if (!isMem() || Memory.OffsetRegNum != 0 || 1602 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 1603 return false; 1604 // Immediate offset, multiple of 4 in range [0, 1020]. 1605 if (!Memory.OffsetImm) return true; 1606 int64_t Val = Memory.OffsetImm->getValue(); 1607 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 1608 } 1609 1610 bool isMemImm8s4Offset() const { 1611 // If we have an immediate that's not a constant, treat it as a label 1612 // reference needing a fixup. If it is a constant, it's something else 1613 // and we reject it. 1614 if (isImm() && !isa<MCConstantExpr>(getImm())) 1615 return true; 1616 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1617 return false; 1618 // Immediate offset a multiple of 4 in range [-1020, 1020]. 1619 if (!Memory.OffsetImm) return true; 1620 int64_t Val = Memory.OffsetImm->getValue(); 1621 // Special case, #-0 is std::numeric_limits<int32_t>::min(). 1622 return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || 1623 Val == std::numeric_limits<int32_t>::min(); 1624 } 1625 bool isMemImm7s4Offset() const { 1626 // If we have an immediate that's not a constant, treat it as a label 1627 // reference needing a fixup. If it is a constant, it's something else 1628 // and we reject it. 1629 if (isImm() && !isa<MCConstantExpr>(getImm())) 1630 return true; 1631 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0 || 1632 !ARMMCRegisterClasses[ARM::GPRnopcRegClassID].contains( 1633 Memory.BaseRegNum)) 1634 return false; 1635 // Immediate offset a multiple of 4 in range [-508, 508]. 1636 if (!Memory.OffsetImm) return true; 1637 int64_t Val = Memory.OffsetImm->getValue(); 1638 // Special case, #-0 is INT32_MIN. 1639 return (Val >= -508 && Val <= 508 && (Val & 3) == 0) || Val == INT32_MIN; 1640 } 1641 bool isMemImm0_1020s4Offset() const { 1642 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1643 return false; 1644 // Immediate offset a multiple of 4 in range [0, 1020]. 1645 if (!Memory.OffsetImm) return true; 1646 int64_t Val = Memory.OffsetImm->getValue(); 1647 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 1648 } 1649 1650 bool isMemImm8Offset() const { 1651 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1652 return false; 1653 // Base reg of PC isn't allowed for these encodings. 1654 if (Memory.BaseRegNum == ARM::PC) return false; 1655 // Immediate offset in range [-255, 255]. 1656 if (!Memory.OffsetImm) return true; 1657 int64_t Val = Memory.OffsetImm->getValue(); 1658 return (Val == std::numeric_limits<int32_t>::min()) || 1659 (Val > -256 && Val < 256); 1660 } 1661 1662 bool isMemPosImm8Offset() const { 1663 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1664 return false; 1665 // Immediate offset in range [0, 255]. 1666 if (!Memory.OffsetImm) return true; 1667 int64_t Val = Memory.OffsetImm->getValue(); 1668 return Val >= 0 && Val < 256; 1669 } 1670 1671 bool isMemNegImm8Offset() const { 1672 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1673 return false; 1674 // Base reg of PC isn't allowed for these encodings. 1675 if (Memory.BaseRegNum == ARM::PC) return false; 1676 // Immediate offset in range [-255, -1]. 1677 if (!Memory.OffsetImm) return false; 1678 int64_t Val = Memory.OffsetImm->getValue(); 1679 return (Val == std::numeric_limits<int32_t>::min()) || 1680 (Val > -256 && Val < 0); 1681 } 1682 1683 bool isMemUImm12Offset() const { 1684 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1685 return false; 1686 // Immediate offset in range [0, 4095]. 1687 if (!Memory.OffsetImm) return true; 1688 int64_t Val = Memory.OffsetImm->getValue(); 1689 return (Val >= 0 && Val < 4096); 1690 } 1691 1692 bool isMemImm12Offset() const { 1693 // If we have an immediate that's not a constant, treat it as a label 1694 // reference needing a fixup. If it is a constant, it's something else 1695 // and we reject it. 1696 1697 if (isImm() && !isa<MCConstantExpr>(getImm())) 1698 return true; 1699 1700 if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 1701 return false; 1702 // Immediate offset in range [-4095, 4095]. 1703 if (!Memory.OffsetImm) return true; 1704 int64_t Val = Memory.OffsetImm->getValue(); 1705 return (Val > -4096 && Val < 4096) || 1706 (Val == std::numeric_limits<int32_t>::min()); 1707 } 1708 1709 bool isConstPoolAsmImm() const { 1710 // Delay processing of Constant Pool Immediate, this will turn into 1711 // a constant. Match no other operand 1712 return (isConstantPoolImm()); 1713 } 1714 1715 bool isPostIdxImm8() const { 1716 if (!isImm()) return false; 1717 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1718 if (!CE) return false; 1719 int64_t Val = CE->getValue(); 1720 return (Val > -256 && Val < 256) || 1721 (Val == std::numeric_limits<int32_t>::min()); 1722 } 1723 1724 bool isPostIdxImm8s4() const { 1725 if (!isImm()) return false; 1726 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1727 if (!CE) return false; 1728 int64_t Val = CE->getValue(); 1729 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 1730 (Val == std::numeric_limits<int32_t>::min()); 1731 } 1732 1733 bool isMSRMask() const { return Kind == k_MSRMask; } 1734 bool isBankedReg() const { return Kind == k_BankedReg; } 1735 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 1736 1737 // NEON operands. 1738 bool isSingleSpacedVectorList() const { 1739 return Kind == k_VectorList && !VectorList.isDoubleSpaced; 1740 } 1741 1742 bool isDoubleSpacedVectorList() const { 1743 return Kind == k_VectorList && VectorList.isDoubleSpaced; 1744 } 1745 1746 bool isVecListOneD() const { 1747 if (!isSingleSpacedVectorList()) return false; 1748 return VectorList.Count == 1; 1749 } 1750 1751 bool isVecListDPair() const { 1752 if (!isSingleSpacedVectorList()) return false; 1753 return (ARMMCRegisterClasses[ARM::DPairRegClassID] 1754 .contains(VectorList.RegNum)); 1755 } 1756 1757 bool isVecListThreeD() const { 1758 if (!isSingleSpacedVectorList()) return false; 1759 return VectorList.Count == 3; 1760 } 1761 1762 bool isVecListFourD() const { 1763 if (!isSingleSpacedVectorList()) return false; 1764 return VectorList.Count == 4; 1765 } 1766 1767 bool isVecListDPairSpaced() const { 1768 if (Kind != k_VectorList) return false; 1769 if (isSingleSpacedVectorList()) return false; 1770 return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID] 1771 .contains(VectorList.RegNum)); 1772 } 1773 1774 bool isVecListThreeQ() const { 1775 if (!isDoubleSpacedVectorList()) return false; 1776 return VectorList.Count == 3; 1777 } 1778 1779 bool isVecListFourQ() const { 1780 if (!isDoubleSpacedVectorList()) return false; 1781 return VectorList.Count == 4; 1782 } 1783 1784 bool isSingleSpacedVectorAllLanes() const { 1785 return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced; 1786 } 1787 1788 bool isDoubleSpacedVectorAllLanes() const { 1789 return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced; 1790 } 1791 1792 bool isVecListOneDAllLanes() const { 1793 if (!isSingleSpacedVectorAllLanes()) return false; 1794 return VectorList.Count == 1; 1795 } 1796 1797 bool isVecListDPairAllLanes() const { 1798 if (!isSingleSpacedVectorAllLanes()) return false; 1799 return (ARMMCRegisterClasses[ARM::DPairRegClassID] 1800 .contains(VectorList.RegNum)); 1801 } 1802 1803 bool isVecListDPairSpacedAllLanes() const { 1804 if (!isDoubleSpacedVectorAllLanes()) return false; 1805 return VectorList.Count == 2; 1806 } 1807 1808 bool isVecListThreeDAllLanes() const { 1809 if (!isSingleSpacedVectorAllLanes()) return false; 1810 return VectorList.Count == 3; 1811 } 1812 1813 bool isVecListThreeQAllLanes() const { 1814 if (!isDoubleSpacedVectorAllLanes()) return false; 1815 return VectorList.Count == 3; 1816 } 1817 1818 bool isVecListFourDAllLanes() const { 1819 if (!isSingleSpacedVectorAllLanes()) return false; 1820 return VectorList.Count == 4; 1821 } 1822 1823 bool isVecListFourQAllLanes() const { 1824 if (!isDoubleSpacedVectorAllLanes()) return false; 1825 return VectorList.Count == 4; 1826 } 1827 1828 bool isSingleSpacedVectorIndexed() const { 1829 return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced; 1830 } 1831 1832 bool isDoubleSpacedVectorIndexed() const { 1833 return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced; 1834 } 1835 1836 bool isVecListOneDByteIndexed() const { 1837 if (!isSingleSpacedVectorIndexed()) return false; 1838 return VectorList.Count == 1 && VectorList.LaneIndex <= 7; 1839 } 1840 1841 bool isVecListOneDHWordIndexed() const { 1842 if (!isSingleSpacedVectorIndexed()) return false; 1843 return VectorList.Count == 1 && VectorList.LaneIndex <= 3; 1844 } 1845 1846 bool isVecListOneDWordIndexed() const { 1847 if (!isSingleSpacedVectorIndexed()) return false; 1848 return VectorList.Count == 1 && VectorList.LaneIndex <= 1; 1849 } 1850 1851 bool isVecListTwoDByteIndexed() const { 1852 if (!isSingleSpacedVectorIndexed()) return false; 1853 return VectorList.Count == 2 && VectorList.LaneIndex <= 7; 1854 } 1855 1856 bool isVecListTwoDHWordIndexed() const { 1857 if (!isSingleSpacedVectorIndexed()) return false; 1858 return VectorList.Count == 2 && VectorList.LaneIndex <= 3; 1859 } 1860 1861 bool isVecListTwoQWordIndexed() const { 1862 if (!isDoubleSpacedVectorIndexed()) return false; 1863 return VectorList.Count == 2 && VectorList.LaneIndex <= 1; 1864 } 1865 1866 bool isVecListTwoQHWordIndexed() const { 1867 if (!isDoubleSpacedVectorIndexed()) return false; 1868 return VectorList.Count == 2 && VectorList.LaneIndex <= 3; 1869 } 1870 1871 bool isVecListTwoDWordIndexed() const { 1872 if (!isSingleSpacedVectorIndexed()) return false; 1873 return VectorList.Count == 2 && VectorList.LaneIndex <= 1; 1874 } 1875 1876 bool isVecListThreeDByteIndexed() const { 1877 if (!isSingleSpacedVectorIndexed()) return false; 1878 return VectorList.Count == 3 && VectorList.LaneIndex <= 7; 1879 } 1880 1881 bool isVecListThreeDHWordIndexed() const { 1882 if (!isSingleSpacedVectorIndexed()) return false; 1883 return VectorList.Count == 3 && VectorList.LaneIndex <= 3; 1884 } 1885 1886 bool isVecListThreeQWordIndexed() const { 1887 if (!isDoubleSpacedVectorIndexed()) return false; 1888 return VectorList.Count == 3 && VectorList.LaneIndex <= 1; 1889 } 1890 1891 bool isVecListThreeQHWordIndexed() const { 1892 if (!isDoubleSpacedVectorIndexed()) return false; 1893 return VectorList.Count == 3 && VectorList.LaneIndex <= 3; 1894 } 1895 1896 bool isVecListThreeDWordIndexed() const { 1897 if (!isSingleSpacedVectorIndexed()) return false; 1898 return VectorList.Count == 3 && VectorList.LaneIndex <= 1; 1899 } 1900 1901 bool isVecListFourDByteIndexed() const { 1902 if (!isSingleSpacedVectorIndexed()) return false; 1903 return VectorList.Count == 4 && VectorList.LaneIndex <= 7; 1904 } 1905 1906 bool isVecListFourDHWordIndexed() const { 1907 if (!isSingleSpacedVectorIndexed()) return false; 1908 return VectorList.Count == 4 && VectorList.LaneIndex <= 3; 1909 } 1910 1911 bool isVecListFourQWordIndexed() const { 1912 if (!isDoubleSpacedVectorIndexed()) return false; 1913 return VectorList.Count == 4 && VectorList.LaneIndex <= 1; 1914 } 1915 1916 bool isVecListFourQHWordIndexed() const { 1917 if (!isDoubleSpacedVectorIndexed()) return false; 1918 return VectorList.Count == 4 && VectorList.LaneIndex <= 3; 1919 } 1920 1921 bool isVecListFourDWordIndexed() const { 1922 if (!isSingleSpacedVectorIndexed()) return false; 1923 return VectorList.Count == 4 && VectorList.LaneIndex <= 1; 1924 } 1925 1926 bool isVectorIndex() const { return Kind == k_VectorIndex; } 1927 1928 template <unsigned NumLanes> 1929 bool isVectorIndexInRange() const { 1930 if (Kind != k_VectorIndex) return false; 1931 return VectorIndex.Val < NumLanes; 1932 } 1933 1934 bool isVectorIndex8() const { return isVectorIndexInRange<8>(); } 1935 bool isVectorIndex16() const { return isVectorIndexInRange<4>(); } 1936 bool isVectorIndex32() const { return isVectorIndexInRange<2>(); } 1937 bool isVectorIndex64() const { return isVectorIndexInRange<1>(); } 1938 1939 bool isNEONi8splat() const { 1940 if (!isImm()) return false; 1941 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1942 // Must be a constant. 1943 if (!CE) return false; 1944 int64_t Value = CE->getValue(); 1945 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 1946 // value. 1947 return Value >= 0 && Value < 256; 1948 } 1949 1950 bool isNEONi16splat() const { 1951 if (isNEONByteReplicate(2)) 1952 return false; // Leave that for bytes replication and forbid by default. 1953 if (!isImm()) 1954 return false; 1955 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1956 // Must be a constant. 1957 if (!CE) return false; 1958 unsigned Value = CE->getValue(); 1959 return ARM_AM::isNEONi16splat(Value); 1960 } 1961 1962 bool isNEONi16splatNot() const { 1963 if (!isImm()) 1964 return false; 1965 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1966 // Must be a constant. 1967 if (!CE) return false; 1968 unsigned Value = CE->getValue(); 1969 return ARM_AM::isNEONi16splat(~Value & 0xffff); 1970 } 1971 1972 bool isNEONi32splat() const { 1973 if (isNEONByteReplicate(4)) 1974 return false; // Leave that for bytes replication and forbid by default. 1975 if (!isImm()) 1976 return false; 1977 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1978 // Must be a constant. 1979 if (!CE) return false; 1980 unsigned Value = CE->getValue(); 1981 return ARM_AM::isNEONi32splat(Value); 1982 } 1983 1984 bool isNEONi32splatNot() const { 1985 if (!isImm()) 1986 return false; 1987 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1988 // Must be a constant. 1989 if (!CE) return false; 1990 unsigned Value = CE->getValue(); 1991 return ARM_AM::isNEONi32splat(~Value); 1992 } 1993 1994 static bool isValidNEONi32vmovImm(int64_t Value) { 1995 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1996 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1997 return ((Value & 0xffffffffffffff00) == 0) || 1998 ((Value & 0xffffffffffff00ff) == 0) || 1999 ((Value & 0xffffffffff00ffff) == 0) || 2000 ((Value & 0xffffffff00ffffff) == 0) || 2001 ((Value & 0xffffffffffff00ff) == 0xff) || 2002 ((Value & 0xffffffffff00ffff) == 0xffff); 2003 } 2004 2005 bool isNEONReplicate(unsigned Width, unsigned NumElems, bool Inv) const { 2006 assert((Width == 8 || Width == 16 || Width == 32) && 2007 "Invalid element width"); 2008 assert(NumElems * Width <= 64 && "Invalid result width"); 2009 2010 if (!isImm()) 2011 return false; 2012 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2013 // Must be a constant. 2014 if (!CE) 2015 return false; 2016 int64_t Value = CE->getValue(); 2017 if (!Value) 2018 return false; // Don't bother with zero. 2019 if (Inv) 2020 Value = ~Value; 2021 2022 uint64_t Mask = (1ull << Width) - 1; 2023 uint64_t Elem = Value & Mask; 2024 if (Width == 16 && (Elem & 0x00ff) != 0 && (Elem & 0xff00) != 0) 2025 return false; 2026 if (Width == 32 && !isValidNEONi32vmovImm(Elem)) 2027 return false; 2028 2029 for (unsigned i = 1; i < NumElems; ++i) { 2030 Value >>= Width; 2031 if ((Value & Mask) != Elem) 2032 return false; 2033 } 2034 return true; 2035 } 2036 2037 bool isNEONByteReplicate(unsigned NumBytes) const { 2038 return isNEONReplicate(8, NumBytes, false); 2039 } 2040 2041 static void checkNeonReplicateArgs(unsigned FromW, unsigned ToW) { 2042 assert((FromW == 8 || FromW == 16 || FromW == 32) && 2043 "Invalid source width"); 2044 assert((ToW == 16 || ToW == 32 || ToW == 64) && 2045 "Invalid destination width"); 2046 assert(FromW < ToW && "ToW is not less than FromW"); 2047 } 2048 2049 template<unsigned FromW, unsigned ToW> 2050 bool isNEONmovReplicate() const { 2051 checkNeonReplicateArgs(FromW, ToW); 2052 if (ToW == 64 && isNEONi64splat()) 2053 return false; 2054 return isNEONReplicate(FromW, ToW / FromW, false); 2055 } 2056 2057 template<unsigned FromW, unsigned ToW> 2058 bool isNEONinvReplicate() const { 2059 checkNeonReplicateArgs(FromW, ToW); 2060 return isNEONReplicate(FromW, ToW / FromW, true); 2061 } 2062 2063 bool isNEONi32vmov() const { 2064 if (isNEONByteReplicate(4)) 2065 return false; // Let it to be classified as byte-replicate case. 2066 if (!isImm()) 2067 return false; 2068 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2069 // Must be a constant. 2070 if (!CE) 2071 return false; 2072 return isValidNEONi32vmovImm(CE->getValue()); 2073 } 2074 2075 bool isNEONi32vmovNeg() const { 2076 if (!isImm()) return false; 2077 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2078 // Must be a constant. 2079 if (!CE) return false; 2080 return isValidNEONi32vmovImm(~CE->getValue()); 2081 } 2082 2083 bool isNEONi64splat() const { 2084 if (!isImm()) return false; 2085 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2086 // Must be a constant. 2087 if (!CE) return false; 2088 uint64_t Value = CE->getValue(); 2089 // i64 value with each byte being either 0 or 0xff. 2090 for (unsigned i = 0; i < 8; ++i, Value >>= 8) 2091 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 2092 return true; 2093 } 2094 2095 template<int64_t Angle, int64_t Remainder> 2096 bool isComplexRotation() const { 2097 if (!isImm()) return false; 2098 2099 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2100 if (!CE) return false; 2101 uint64_t Value = CE->getValue(); 2102 2103 return (Value % Angle == Remainder && Value <= 270); 2104 } 2105 2106 bool isMVELongShift() const { 2107 if (!isImm()) return false; 2108 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2109 // Must be a constant. 2110 if (!CE) return false; 2111 uint64_t Value = CE->getValue(); 2112 return Value >= 1 && Value <= 32; 2113 } 2114 2115 bool isITCondCodeNoAL() const { 2116 if (!isITCondCode()) return false; 2117 ARMCC::CondCodes CC = getCondCode(); 2118 return CC != ARMCC::AL; 2119 } 2120 2121 bool isITCondCodeRestrictedI() const { 2122 if (!isITCondCode()) 2123 return false; 2124 ARMCC::CondCodes CC = getCondCode(); 2125 return CC == ARMCC::EQ || CC == ARMCC::NE; 2126 } 2127 2128 bool isITCondCodeRestrictedS() const { 2129 if (!isITCondCode()) 2130 return false; 2131 ARMCC::CondCodes CC = getCondCode(); 2132 return CC == ARMCC::LT || CC == ARMCC::GT || CC == ARMCC::LE || 2133 CC == ARMCC::GE; 2134 } 2135 2136 bool isITCondCodeRestrictedU() const { 2137 if (!isITCondCode()) 2138 return false; 2139 ARMCC::CondCodes CC = getCondCode(); 2140 return CC == ARMCC::HS || CC == ARMCC::HI; 2141 } 2142 2143 bool isITCondCodeRestrictedFP() const { 2144 if (!isITCondCode()) 2145 return false; 2146 ARMCC::CondCodes CC = getCondCode(); 2147 return CC == ARMCC::EQ || CC == ARMCC::NE || CC == ARMCC::LT || 2148 CC == ARMCC::GT || CC == ARMCC::LE || CC == ARMCC::GE; 2149 } 2150 2151 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 2152 // Add as immediates when possible. Null MCExpr = 0. 2153 if (!Expr) 2154 Inst.addOperand(MCOperand::createImm(0)); 2155 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 2156 Inst.addOperand(MCOperand::createImm(CE->getValue())); 2157 else 2158 Inst.addOperand(MCOperand::createExpr(Expr)); 2159 } 2160 2161 void addARMBranchTargetOperands(MCInst &Inst, unsigned N) const { 2162 assert(N == 1 && "Invalid number of operands!"); 2163 addExpr(Inst, getImm()); 2164 } 2165 2166 void addThumbBranchTargetOperands(MCInst &Inst, unsigned N) const { 2167 assert(N == 1 && "Invalid number of operands!"); 2168 addExpr(Inst, getImm()); 2169 } 2170 2171 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 2172 assert(N == 2 && "Invalid number of operands!"); 2173 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode()))); 2174 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 2175 Inst.addOperand(MCOperand::createReg(RegNum)); 2176 } 2177 2178 void addVPTPredNOperands(MCInst &Inst, unsigned N) const { 2179 assert(N == 2 && "Invalid number of operands!"); 2180 Inst.addOperand(MCOperand::createImm(unsigned(getVPTPred()))); 2181 unsigned RegNum = getVPTPred() == ARMVCC::None ? 0: ARM::P0; 2182 Inst.addOperand(MCOperand::createReg(RegNum)); 2183 } 2184 2185 void addVPTPredROperands(MCInst &Inst, unsigned N) const { 2186 assert(N == 3 && "Invalid number of operands!"); 2187 addVPTPredNOperands(Inst, N-1); 2188 unsigned RegNum; 2189 if (getVPTPred() == ARMVCC::None) { 2190 RegNum = 0; 2191 } else { 2192 unsigned NextOpIndex = Inst.getNumOperands(); 2193 const MCInstrDesc &MCID = ARMInsts[Inst.getOpcode()]; 2194 int TiedOp = MCID.getOperandConstraint(NextOpIndex, MCOI::TIED_TO); 2195 assert(TiedOp >= 0 && 2196 "Inactive register in vpred_r is not tied to an output!"); 2197 RegNum = Inst.getOperand(TiedOp).getReg(); 2198 } 2199 Inst.addOperand(MCOperand::createReg(RegNum)); 2200 } 2201 2202 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 2203 assert(N == 1 && "Invalid number of operands!"); 2204 Inst.addOperand(MCOperand::createImm(getCoproc())); 2205 } 2206 2207 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 2208 assert(N == 1 && "Invalid number of operands!"); 2209 Inst.addOperand(MCOperand::createImm(getCoproc())); 2210 } 2211 2212 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 2213 assert(N == 1 && "Invalid number of operands!"); 2214 Inst.addOperand(MCOperand::createImm(CoprocOption.Val)); 2215 } 2216 2217 void addITMaskOperands(MCInst &Inst, unsigned N) const { 2218 assert(N == 1 && "Invalid number of operands!"); 2219 Inst.addOperand(MCOperand::createImm(ITMask.Mask)); 2220 } 2221 2222 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 2223 assert(N == 1 && "Invalid number of operands!"); 2224 Inst.addOperand(MCOperand::createImm(unsigned(getCondCode()))); 2225 } 2226 2227 void addITCondCodeInvOperands(MCInst &Inst, unsigned N) const { 2228 assert(N == 1 && "Invalid number of operands!"); 2229 Inst.addOperand(MCOperand::createImm(unsigned(ARMCC::getOppositeCondition(getCondCode())))); 2230 } 2231 2232 void addCCOutOperands(MCInst &Inst, unsigned N) const { 2233 assert(N == 1 && "Invalid number of operands!"); 2234 Inst.addOperand(MCOperand::createReg(getReg())); 2235 } 2236 2237 void addRegOperands(MCInst &Inst, unsigned N) const { 2238 assert(N == 1 && "Invalid number of operands!"); 2239 Inst.addOperand(MCOperand::createReg(getReg())); 2240 } 2241 2242 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 2243 assert(N == 3 && "Invalid number of operands!"); 2244 assert(isRegShiftedReg() && 2245 "addRegShiftedRegOperands() on non-RegShiftedReg!"); 2246 Inst.addOperand(MCOperand::createReg(RegShiftedReg.SrcReg)); 2247 Inst.addOperand(MCOperand::createReg(RegShiftedReg.ShiftReg)); 2248 Inst.addOperand(MCOperand::createImm( 2249 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 2250 } 2251 2252 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 2253 assert(N == 2 && "Invalid number of operands!"); 2254 assert(isRegShiftedImm() && 2255 "addRegShiftedImmOperands() on non-RegShiftedImm!"); 2256 Inst.addOperand(MCOperand::createReg(RegShiftedImm.SrcReg)); 2257 // Shift of #32 is encoded as 0 where permitted 2258 unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm); 2259 Inst.addOperand(MCOperand::createImm( 2260 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm))); 2261 } 2262 2263 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 2264 assert(N == 1 && "Invalid number of operands!"); 2265 Inst.addOperand(MCOperand::createImm((ShifterImm.isASR << 5) | 2266 ShifterImm.Imm)); 2267 } 2268 2269 void addRegListOperands(MCInst &Inst, unsigned N) const { 2270 assert(N == 1 && "Invalid number of operands!"); 2271 const SmallVectorImpl<unsigned> &RegList = getRegList(); 2272 for (SmallVectorImpl<unsigned>::const_iterator 2273 I = RegList.begin(), E = RegList.end(); I != E; ++I) 2274 Inst.addOperand(MCOperand::createReg(*I)); 2275 } 2276 2277 void addRegListWithAPSROperands(MCInst &Inst, unsigned N) const { 2278 assert(N == 1 && "Invalid number of operands!"); 2279 const SmallVectorImpl<unsigned> &RegList = getRegList(); 2280 for (SmallVectorImpl<unsigned>::const_iterator 2281 I = RegList.begin(), E = RegList.end(); I != E; ++I) 2282 Inst.addOperand(MCOperand::createReg(*I)); 2283 } 2284 2285 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 2286 addRegListOperands(Inst, N); 2287 } 2288 2289 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 2290 addRegListOperands(Inst, N); 2291 } 2292 2293 void addFPSRegListWithVPROperands(MCInst &Inst, unsigned N) const { 2294 addRegListOperands(Inst, N); 2295 } 2296 2297 void addFPDRegListWithVPROperands(MCInst &Inst, unsigned N) const { 2298 addRegListOperands(Inst, N); 2299 } 2300 2301 void addRotImmOperands(MCInst &Inst, unsigned N) const { 2302 assert(N == 1 && "Invalid number of operands!"); 2303 // Encoded as val>>3. The printer handles display as 8, 16, 24. 2304 Inst.addOperand(MCOperand::createImm(RotImm.Imm >> 3)); 2305 } 2306 2307 void addModImmOperands(MCInst &Inst, unsigned N) const { 2308 assert(N == 1 && "Invalid number of operands!"); 2309 2310 // Support for fixups (MCFixup) 2311 if (isImm()) 2312 return addImmOperands(Inst, N); 2313 2314 Inst.addOperand(MCOperand::createImm(ModImm.Bits | (ModImm.Rot << 7))); 2315 } 2316 2317 void addModImmNotOperands(MCInst &Inst, unsigned N) const { 2318 assert(N == 1 && "Invalid number of operands!"); 2319 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2320 uint32_t Enc = ARM_AM::getSOImmVal(~CE->getValue()); 2321 Inst.addOperand(MCOperand::createImm(Enc)); 2322 } 2323 2324 void addModImmNegOperands(MCInst &Inst, unsigned N) const { 2325 assert(N == 1 && "Invalid number of operands!"); 2326 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2327 uint32_t Enc = ARM_AM::getSOImmVal(-CE->getValue()); 2328 Inst.addOperand(MCOperand::createImm(Enc)); 2329 } 2330 2331 void addThumbModImmNeg8_255Operands(MCInst &Inst, unsigned N) const { 2332 assert(N == 1 && "Invalid number of operands!"); 2333 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2334 uint32_t Val = -CE->getValue(); 2335 Inst.addOperand(MCOperand::createImm(Val)); 2336 } 2337 2338 void addThumbModImmNeg1_7Operands(MCInst &Inst, unsigned N) const { 2339 assert(N == 1 && "Invalid number of operands!"); 2340 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2341 uint32_t Val = -CE->getValue(); 2342 Inst.addOperand(MCOperand::createImm(Val)); 2343 } 2344 2345 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 2346 assert(N == 1 && "Invalid number of operands!"); 2347 // Munge the lsb/width into a bitfield mask. 2348 unsigned lsb = Bitfield.LSB; 2349 unsigned width = Bitfield.Width; 2350 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 2351 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 2352 (32 - (lsb + width))); 2353 Inst.addOperand(MCOperand::createImm(Mask)); 2354 } 2355 2356 void addImmOperands(MCInst &Inst, unsigned N) const { 2357 assert(N == 1 && "Invalid number of operands!"); 2358 addExpr(Inst, getImm()); 2359 } 2360 2361 void addFBits16Operands(MCInst &Inst, unsigned N) const { 2362 assert(N == 1 && "Invalid number of operands!"); 2363 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2364 Inst.addOperand(MCOperand::createImm(16 - CE->getValue())); 2365 } 2366 2367 void addFBits32Operands(MCInst &Inst, unsigned N) const { 2368 assert(N == 1 && "Invalid number of operands!"); 2369 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2370 Inst.addOperand(MCOperand::createImm(32 - CE->getValue())); 2371 } 2372 2373 void addFPImmOperands(MCInst &Inst, unsigned N) const { 2374 assert(N == 1 && "Invalid number of operands!"); 2375 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2376 int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue())); 2377 Inst.addOperand(MCOperand::createImm(Val)); 2378 } 2379 2380 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 2381 assert(N == 1 && "Invalid number of operands!"); 2382 // FIXME: We really want to scale the value here, but the LDRD/STRD 2383 // instruction don't encode operands that way yet. 2384 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2385 Inst.addOperand(MCOperand::createImm(CE->getValue())); 2386 } 2387 2388 void addImm7s4Operands(MCInst &Inst, unsigned N) const { 2389 assert(N == 1 && "Invalid number of operands!"); 2390 // FIXME: We really want to scale the value here, but the VSTR/VLDR_VSYSR 2391 // instruction don't encode operands that way yet. 2392 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2393 Inst.addOperand(MCOperand::createImm(CE->getValue())); 2394 } 2395 2396 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 2397 assert(N == 1 && "Invalid number of operands!"); 2398 // The immediate is scaled by four in the encoding and is stored 2399 // in the MCInst as such. Lop off the low two bits here. 2400 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2401 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4)); 2402 } 2403 2404 void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const { 2405 assert(N == 1 && "Invalid number of operands!"); 2406 // The immediate is scaled by four in the encoding and is stored 2407 // in the MCInst as such. Lop off the low two bits here. 2408 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2409 Inst.addOperand(MCOperand::createImm(-(CE->getValue() / 4))); 2410 } 2411 2412 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 2413 assert(N == 1 && "Invalid number of operands!"); 2414 // The immediate is scaled by four in the encoding and is stored 2415 // in the MCInst as such. Lop off the low two bits here. 2416 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2417 Inst.addOperand(MCOperand::createImm(CE->getValue() / 4)); 2418 } 2419 2420 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 2421 assert(N == 1 && "Invalid number of operands!"); 2422 // The constant encodes as the immediate-1, and we store in the instruction 2423 // the bits as encoded, so subtract off one here. 2424 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2425 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1)); 2426 } 2427 2428 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 2429 assert(N == 1 && "Invalid number of operands!"); 2430 // The constant encodes as the immediate-1, and we store in the instruction 2431 // the bits as encoded, so subtract off one here. 2432 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2433 Inst.addOperand(MCOperand::createImm(CE->getValue() - 1)); 2434 } 2435 2436 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 2437 assert(N == 1 && "Invalid number of operands!"); 2438 // The constant encodes as the immediate, except for 32, which encodes as 2439 // zero. 2440 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2441 unsigned Imm = CE->getValue(); 2442 Inst.addOperand(MCOperand::createImm((Imm == 32 ? 0 : Imm))); 2443 } 2444 2445 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 2446 assert(N == 1 && "Invalid number of operands!"); 2447 // An ASR value of 32 encodes as 0, so that's how we want to add it to 2448 // the instruction as well. 2449 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2450 int Val = CE->getValue(); 2451 Inst.addOperand(MCOperand::createImm(Val == 32 ? 0 : Val)); 2452 } 2453 2454 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 2455 assert(N == 1 && "Invalid number of operands!"); 2456 // The operand is actually a t2_so_imm, but we have its bitwise 2457 // negation in the assembly source, so twiddle it here. 2458 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2459 Inst.addOperand(MCOperand::createImm(~(uint32_t)CE->getValue())); 2460 } 2461 2462 void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const { 2463 assert(N == 1 && "Invalid number of operands!"); 2464 // The operand is actually a t2_so_imm, but we have its 2465 // negation in the assembly source, so twiddle it here. 2466 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2467 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue())); 2468 } 2469 2470 void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const { 2471 assert(N == 1 && "Invalid number of operands!"); 2472 // The operand is actually an imm0_4095, but we have its 2473 // negation in the assembly source, so twiddle it here. 2474 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2475 Inst.addOperand(MCOperand::createImm(-(uint32_t)CE->getValue())); 2476 } 2477 2478 void addUnsignedOffset_b8s2Operands(MCInst &Inst, unsigned N) const { 2479 if(const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm())) { 2480 Inst.addOperand(MCOperand::createImm(CE->getValue() >> 2)); 2481 return; 2482 } 2483 2484 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val); 2485 assert(SR && "Unknown value type!"); 2486 Inst.addOperand(MCOperand::createExpr(SR)); 2487 } 2488 2489 void addThumbMemPCOperands(MCInst &Inst, unsigned N) const { 2490 assert(N == 1 && "Invalid number of operands!"); 2491 if (isImm()) { 2492 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2493 if (CE) { 2494 Inst.addOperand(MCOperand::createImm(CE->getValue())); 2495 return; 2496 } 2497 2498 const MCSymbolRefExpr *SR = dyn_cast<MCSymbolRefExpr>(Imm.Val); 2499 2500 assert(SR && "Unknown value type!"); 2501 Inst.addOperand(MCOperand::createExpr(SR)); 2502 return; 2503 } 2504 2505 assert(isMem() && "Unknown value type!"); 2506 assert(isa<MCConstantExpr>(Memory.OffsetImm) && "Unknown value type!"); 2507 Inst.addOperand(MCOperand::createImm(Memory.OffsetImm->getValue())); 2508 } 2509 2510 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 2511 assert(N == 1 && "Invalid number of operands!"); 2512 Inst.addOperand(MCOperand::createImm(unsigned(getMemBarrierOpt()))); 2513 } 2514 2515 void addInstSyncBarrierOptOperands(MCInst &Inst, unsigned N) const { 2516 assert(N == 1 && "Invalid number of operands!"); 2517 Inst.addOperand(MCOperand::createImm(unsigned(getInstSyncBarrierOpt()))); 2518 } 2519 2520 void addTraceSyncBarrierOptOperands(MCInst &Inst, unsigned N) const { 2521 assert(N == 1 && "Invalid number of operands!"); 2522 Inst.addOperand(MCOperand::createImm(unsigned(getTraceSyncBarrierOpt()))); 2523 } 2524 2525 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 2526 assert(N == 1 && "Invalid number of operands!"); 2527 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2528 } 2529 2530 void addMemNoOffsetT2Operands(MCInst &Inst, unsigned N) const { 2531 assert(N == 1 && "Invalid number of operands!"); 2532 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2533 } 2534 2535 void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const { 2536 assert(N == 1 && "Invalid number of operands!"); 2537 int32_t Imm = Memory.OffsetImm->getValue(); 2538 Inst.addOperand(MCOperand::createImm(Imm)); 2539 } 2540 2541 void addAdrLabelOperands(MCInst &Inst, unsigned N) const { 2542 assert(N == 1 && "Invalid number of operands!"); 2543 assert(isImm() && "Not an immediate!"); 2544 2545 // If we have an immediate that's not a constant, treat it as a label 2546 // reference needing a fixup. 2547 if (!isa<MCConstantExpr>(getImm())) { 2548 Inst.addOperand(MCOperand::createExpr(getImm())); 2549 return; 2550 } 2551 2552 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2553 int Val = CE->getValue(); 2554 Inst.addOperand(MCOperand::createImm(Val)); 2555 } 2556 2557 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 2558 assert(N == 2 && "Invalid number of operands!"); 2559 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2560 Inst.addOperand(MCOperand::createImm(Memory.Alignment)); 2561 } 2562 2563 void addDupAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const { 2564 addAlignedMemoryOperands(Inst, N); 2565 } 2566 2567 void addAlignedMemoryNoneOperands(MCInst &Inst, unsigned N) const { 2568 addAlignedMemoryOperands(Inst, N); 2569 } 2570 2571 void addAlignedMemory16Operands(MCInst &Inst, unsigned N) const { 2572 addAlignedMemoryOperands(Inst, N); 2573 } 2574 2575 void addDupAlignedMemory16Operands(MCInst &Inst, unsigned N) const { 2576 addAlignedMemoryOperands(Inst, N); 2577 } 2578 2579 void addAlignedMemory32Operands(MCInst &Inst, unsigned N) const { 2580 addAlignedMemoryOperands(Inst, N); 2581 } 2582 2583 void addDupAlignedMemory32Operands(MCInst &Inst, unsigned N) const { 2584 addAlignedMemoryOperands(Inst, N); 2585 } 2586 2587 void addAlignedMemory64Operands(MCInst &Inst, unsigned N) const { 2588 addAlignedMemoryOperands(Inst, N); 2589 } 2590 2591 void addDupAlignedMemory64Operands(MCInst &Inst, unsigned N) const { 2592 addAlignedMemoryOperands(Inst, N); 2593 } 2594 2595 void addAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const { 2596 addAlignedMemoryOperands(Inst, N); 2597 } 2598 2599 void addDupAlignedMemory64or128Operands(MCInst &Inst, unsigned N) const { 2600 addAlignedMemoryOperands(Inst, N); 2601 } 2602 2603 void addAlignedMemory64or128or256Operands(MCInst &Inst, unsigned N) const { 2604 addAlignedMemoryOperands(Inst, N); 2605 } 2606 2607 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 2608 assert(N == 3 && "Invalid number of operands!"); 2609 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 2610 if (!Memory.OffsetRegNum) { 2611 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 2612 // Special case for #-0 2613 if (Val == std::numeric_limits<int32_t>::min()) Val = 0; 2614 if (Val < 0) Val = -Val; 2615 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 2616 } else { 2617 // For register offset, we encode the shift type and negation flag 2618 // here. 2619 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 2620 Memory.ShiftImm, Memory.ShiftType); 2621 } 2622 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2623 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); 2624 Inst.addOperand(MCOperand::createImm(Val)); 2625 } 2626 2627 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 2628 assert(N == 2 && "Invalid number of operands!"); 2629 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2630 assert(CE && "non-constant AM2OffsetImm operand!"); 2631 int32_t Val = CE->getValue(); 2632 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 2633 // Special case for #-0 2634 if (Val == std::numeric_limits<int32_t>::min()) Val = 0; 2635 if (Val < 0) Val = -Val; 2636 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 2637 Inst.addOperand(MCOperand::createReg(0)); 2638 Inst.addOperand(MCOperand::createImm(Val)); 2639 } 2640 2641 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 2642 assert(N == 3 && "Invalid number of operands!"); 2643 // If we have an immediate that's not a constant, treat it as a label 2644 // reference needing a fixup. If it is a constant, it's something else 2645 // and we reject it. 2646 if (isImm()) { 2647 Inst.addOperand(MCOperand::createExpr(getImm())); 2648 Inst.addOperand(MCOperand::createReg(0)); 2649 Inst.addOperand(MCOperand::createImm(0)); 2650 return; 2651 } 2652 2653 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 2654 if (!Memory.OffsetRegNum) { 2655 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 2656 // Special case for #-0 2657 if (Val == std::numeric_limits<int32_t>::min()) Val = 0; 2658 if (Val < 0) Val = -Val; 2659 Val = ARM_AM::getAM3Opc(AddSub, Val); 2660 } else { 2661 // For register offset, we encode the shift type and negation flag 2662 // here. 2663 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 2664 } 2665 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2666 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); 2667 Inst.addOperand(MCOperand::createImm(Val)); 2668 } 2669 2670 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 2671 assert(N == 2 && "Invalid number of operands!"); 2672 if (Kind == k_PostIndexRegister) { 2673 int32_t Val = 2674 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 2675 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum)); 2676 Inst.addOperand(MCOperand::createImm(Val)); 2677 return; 2678 } 2679 2680 // Constant offset. 2681 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 2682 int32_t Val = CE->getValue(); 2683 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 2684 // Special case for #-0 2685 if (Val == std::numeric_limits<int32_t>::min()) Val = 0; 2686 if (Val < 0) Val = -Val; 2687 Val = ARM_AM::getAM3Opc(AddSub, Val); 2688 Inst.addOperand(MCOperand::createReg(0)); 2689 Inst.addOperand(MCOperand::createImm(Val)); 2690 } 2691 2692 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 2693 assert(N == 2 && "Invalid number of operands!"); 2694 // If we have an immediate that's not a constant, treat it as a label 2695 // reference needing a fixup. If it is a constant, it's something else 2696 // and we reject it. 2697 if (isImm()) { 2698 Inst.addOperand(MCOperand::createExpr(getImm())); 2699 Inst.addOperand(MCOperand::createImm(0)); 2700 return; 2701 } 2702 2703 // The lower two bits are always zero and as such are not encoded. 2704 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 2705 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 2706 // Special case for #-0 2707 if (Val == std::numeric_limits<int32_t>::min()) Val = 0; 2708 if (Val < 0) Val = -Val; 2709 Val = ARM_AM::getAM5Opc(AddSub, Val); 2710 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2711 Inst.addOperand(MCOperand::createImm(Val)); 2712 } 2713 2714 void addAddrMode5FP16Operands(MCInst &Inst, unsigned N) const { 2715 assert(N == 2 && "Invalid number of operands!"); 2716 // If we have an immediate that's not a constant, treat it as a label 2717 // reference needing a fixup. If it is a constant, it's something else 2718 // and we reject it. 2719 if (isImm()) { 2720 Inst.addOperand(MCOperand::createExpr(getImm())); 2721 Inst.addOperand(MCOperand::createImm(0)); 2722 return; 2723 } 2724 2725 // The lower bit is always zero and as such is not encoded. 2726 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 2 : 0; 2727 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 2728 // Special case for #-0 2729 if (Val == std::numeric_limits<int32_t>::min()) Val = 0; 2730 if (Val < 0) Val = -Val; 2731 Val = ARM_AM::getAM5FP16Opc(AddSub, Val); 2732 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2733 Inst.addOperand(MCOperand::createImm(Val)); 2734 } 2735 2736 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 2737 assert(N == 2 && "Invalid number of operands!"); 2738 // If we have an immediate that's not a constant, treat it as a label 2739 // reference needing a fixup. If it is a constant, it's something else 2740 // and we reject it. 2741 if (isImm()) { 2742 Inst.addOperand(MCOperand::createExpr(getImm())); 2743 Inst.addOperand(MCOperand::createImm(0)); 2744 return; 2745 } 2746 2747 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 2748 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2749 Inst.addOperand(MCOperand::createImm(Val)); 2750 } 2751 2752 void addMemImm7s4OffsetOperands(MCInst &Inst, unsigned N) const { 2753 assert(N == 2 && "Invalid number of operands!"); 2754 // If we have an immediate that's not a constant, treat it as a label 2755 // reference needing a fixup. If it is a constant, it's something else 2756 // and we reject it. 2757 if (isImm()) { 2758 Inst.addOperand(MCOperand::createExpr(getImm())); 2759 Inst.addOperand(MCOperand::createImm(0)); 2760 return; 2761 } 2762 2763 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 2764 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2765 Inst.addOperand(MCOperand::createImm(Val)); 2766 } 2767 2768 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 2769 assert(N == 2 && "Invalid number of operands!"); 2770 // The lower two bits are always zero and as such are not encoded. 2771 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 2772 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2773 Inst.addOperand(MCOperand::createImm(Val)); 2774 } 2775 2776 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 2777 assert(N == 2 && "Invalid number of operands!"); 2778 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 2779 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2780 Inst.addOperand(MCOperand::createImm(Val)); 2781 } 2782 2783 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 2784 addMemImm8OffsetOperands(Inst, N); 2785 } 2786 2787 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 2788 addMemImm8OffsetOperands(Inst, N); 2789 } 2790 2791 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 2792 assert(N == 2 && "Invalid number of operands!"); 2793 // If this is an immediate, it's a label reference. 2794 if (isImm()) { 2795 addExpr(Inst, getImm()); 2796 Inst.addOperand(MCOperand::createImm(0)); 2797 return; 2798 } 2799 2800 // Otherwise, it's a normal memory reg+offset. 2801 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 2802 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2803 Inst.addOperand(MCOperand::createImm(Val)); 2804 } 2805 2806 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 2807 assert(N == 2 && "Invalid number of operands!"); 2808 // If this is an immediate, it's a label reference. 2809 if (isImm()) { 2810 addExpr(Inst, getImm()); 2811 Inst.addOperand(MCOperand::createImm(0)); 2812 return; 2813 } 2814 2815 // Otherwise, it's a normal memory reg+offset. 2816 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 2817 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2818 Inst.addOperand(MCOperand::createImm(Val)); 2819 } 2820 2821 void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const { 2822 assert(N == 1 && "Invalid number of operands!"); 2823 // This is container for the immediate that we will create the constant 2824 // pool from 2825 addExpr(Inst, getConstantPoolImm()); 2826 return; 2827 } 2828 2829 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 2830 assert(N == 2 && "Invalid number of operands!"); 2831 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2832 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); 2833 } 2834 2835 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 2836 assert(N == 2 && "Invalid number of operands!"); 2837 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2838 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); 2839 } 2840 2841 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 2842 assert(N == 3 && "Invalid number of operands!"); 2843 unsigned Val = 2844 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 2845 Memory.ShiftImm, Memory.ShiftType); 2846 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2847 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); 2848 Inst.addOperand(MCOperand::createImm(Val)); 2849 } 2850 2851 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 2852 assert(N == 3 && "Invalid number of operands!"); 2853 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2854 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); 2855 Inst.addOperand(MCOperand::createImm(Memory.ShiftImm)); 2856 } 2857 2858 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 2859 assert(N == 2 && "Invalid number of operands!"); 2860 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2861 Inst.addOperand(MCOperand::createReg(Memory.OffsetRegNum)); 2862 } 2863 2864 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 2865 assert(N == 2 && "Invalid number of operands!"); 2866 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 2867 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2868 Inst.addOperand(MCOperand::createImm(Val)); 2869 } 2870 2871 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 2872 assert(N == 2 && "Invalid number of operands!"); 2873 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 2874 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2875 Inst.addOperand(MCOperand::createImm(Val)); 2876 } 2877 2878 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 2879 assert(N == 2 && "Invalid number of operands!"); 2880 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 2881 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2882 Inst.addOperand(MCOperand::createImm(Val)); 2883 } 2884 2885 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 2886 assert(N == 2 && "Invalid number of operands!"); 2887 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 2888 Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); 2889 Inst.addOperand(MCOperand::createImm(Val)); 2890 } 2891 2892 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 2893 assert(N == 1 && "Invalid number of operands!"); 2894 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2895 assert(CE && "non-constant post-idx-imm8 operand!"); 2896 int Imm = CE->getValue(); 2897 bool isAdd = Imm >= 0; 2898 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0; 2899 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 2900 Inst.addOperand(MCOperand::createImm(Imm)); 2901 } 2902 2903 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 2904 assert(N == 1 && "Invalid number of operands!"); 2905 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2906 assert(CE && "non-constant post-idx-imm8s4 operand!"); 2907 int Imm = CE->getValue(); 2908 bool isAdd = Imm >= 0; 2909 if (Imm == std::numeric_limits<int32_t>::min()) Imm = 0; 2910 // Immediate is scaled by 4. 2911 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 2912 Inst.addOperand(MCOperand::createImm(Imm)); 2913 } 2914 2915 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 2916 assert(N == 2 && "Invalid number of operands!"); 2917 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum)); 2918 Inst.addOperand(MCOperand::createImm(PostIdxReg.isAdd)); 2919 } 2920 2921 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 2922 assert(N == 2 && "Invalid number of operands!"); 2923 Inst.addOperand(MCOperand::createReg(PostIdxReg.RegNum)); 2924 // The sign, shift type, and shift amount are encoded in a single operand 2925 // using the AM2 encoding helpers. 2926 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 2927 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 2928 PostIdxReg.ShiftTy); 2929 Inst.addOperand(MCOperand::createImm(Imm)); 2930 } 2931 2932 void addPowerTwoOperands(MCInst &Inst, unsigned N) const { 2933 assert(N == 1 && "Invalid number of operands!"); 2934 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2935 Inst.addOperand(MCOperand::createImm(CE->getValue())); 2936 } 2937 2938 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 2939 assert(N == 1 && "Invalid number of operands!"); 2940 Inst.addOperand(MCOperand::createImm(unsigned(getMSRMask()))); 2941 } 2942 2943 void addBankedRegOperands(MCInst &Inst, unsigned N) const { 2944 assert(N == 1 && "Invalid number of operands!"); 2945 Inst.addOperand(MCOperand::createImm(unsigned(getBankedReg()))); 2946 } 2947 2948 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 2949 assert(N == 1 && "Invalid number of operands!"); 2950 Inst.addOperand(MCOperand::createImm(unsigned(getProcIFlags()))); 2951 } 2952 2953 void addVecListOperands(MCInst &Inst, unsigned N) const { 2954 assert(N == 1 && "Invalid number of operands!"); 2955 Inst.addOperand(MCOperand::createReg(VectorList.RegNum)); 2956 } 2957 2958 void addVecListIndexedOperands(MCInst &Inst, unsigned N) const { 2959 assert(N == 2 && "Invalid number of operands!"); 2960 Inst.addOperand(MCOperand::createReg(VectorList.RegNum)); 2961 Inst.addOperand(MCOperand::createImm(VectorList.LaneIndex)); 2962 } 2963 2964 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 2965 assert(N == 1 && "Invalid number of operands!"); 2966 Inst.addOperand(MCOperand::createImm(getVectorIndex())); 2967 } 2968 2969 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 2970 assert(N == 1 && "Invalid number of operands!"); 2971 Inst.addOperand(MCOperand::createImm(getVectorIndex())); 2972 } 2973 2974 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 2975 assert(N == 1 && "Invalid number of operands!"); 2976 Inst.addOperand(MCOperand::createImm(getVectorIndex())); 2977 } 2978 2979 void addVectorIndex64Operands(MCInst &Inst, unsigned N) const { 2980 assert(N == 1 && "Invalid number of operands!"); 2981 Inst.addOperand(MCOperand::createImm(getVectorIndex())); 2982 } 2983 2984 void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const { 2985 assert(N == 1 && "Invalid number of operands!"); 2986 Inst.addOperand(MCOperand::createImm(getVectorIndex())); 2987 } 2988 2989 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 2990 assert(N == 1 && "Invalid number of operands!"); 2991 // The immediate encodes the type of constant as well as the value. 2992 // Mask in that this is an i8 splat. 2993 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 2994 Inst.addOperand(MCOperand::createImm(CE->getValue() | 0xe00)); 2995 } 2996 2997 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 2998 assert(N == 1 && "Invalid number of operands!"); 2999 // The immediate encodes the type of constant as well as the value. 3000 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 3001 unsigned Value = CE->getValue(); 3002 Value = ARM_AM::encodeNEONi16splat(Value); 3003 Inst.addOperand(MCOperand::createImm(Value)); 3004 } 3005 3006 void addNEONi16splatNotOperands(MCInst &Inst, unsigned N) const { 3007 assert(N == 1 && "Invalid number of operands!"); 3008 // The immediate encodes the type of constant as well as the value. 3009 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 3010 unsigned Value = CE->getValue(); 3011 Value = ARM_AM::encodeNEONi16splat(~Value & 0xffff); 3012 Inst.addOperand(MCOperand::createImm(Value)); 3013 } 3014 3015 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 3016 assert(N == 1 && "Invalid number of operands!"); 3017 // The immediate encodes the type of constant as well as the value. 3018 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 3019 unsigned Value = CE->getValue(); 3020 Value = ARM_AM::encodeNEONi32splat(Value); 3021 Inst.addOperand(MCOperand::createImm(Value)); 3022 } 3023 3024 void addNEONi32splatNotOperands(MCInst &Inst, unsigned N) const { 3025 assert(N == 1 && "Invalid number of operands!"); 3026 // The immediate encodes the type of constant as well as the value. 3027 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 3028 unsigned Value = CE->getValue(); 3029 Value = ARM_AM::encodeNEONi32splat(~Value); 3030 Inst.addOperand(MCOperand::createImm(Value)); 3031 } 3032 3033 void addNEONi8ReplicateOperands(MCInst &Inst, bool Inv) const { 3034 // The immediate encodes the type of constant as well as the value. 3035 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 3036 assert((Inst.getOpcode() == ARM::VMOVv8i8 || 3037 Inst.getOpcode() == ARM::VMOVv16i8) && 3038 "All instructions that wants to replicate non-zero byte " 3039 "always must be replaced with VMOVv8i8 or VMOVv16i8."); 3040 unsigned Value = CE->getValue(); 3041 if (Inv) 3042 Value = ~Value; 3043 unsigned B = Value & 0xff; 3044 B |= 0xe00; // cmode = 0b1110 3045 Inst.addOperand(MCOperand::createImm(B)); 3046 } 3047 3048 void addNEONinvi8ReplicateOperands(MCInst &Inst, unsigned N) const { 3049 assert(N == 1 && "Invalid number of operands!"); 3050 addNEONi8ReplicateOperands(Inst, true); 3051 } 3052 3053 static unsigned encodeNeonVMOVImmediate(unsigned Value) { 3054 if (Value >= 256 && Value <= 0xffff) 3055 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 3056 else if (Value > 0xffff && Value <= 0xffffff) 3057 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 3058 else if (Value > 0xffffff) 3059 Value = (Value >> 24) | 0x600; 3060 return Value; 3061 } 3062 3063 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 3064 assert(N == 1 && "Invalid number of operands!"); 3065 // The immediate encodes the type of constant as well as the value. 3066 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 3067 unsigned Value = encodeNeonVMOVImmediate(CE->getValue()); 3068 Inst.addOperand(MCOperand::createImm(Value)); 3069 } 3070 3071 void addNEONvmovi8ReplicateOperands(MCInst &Inst, unsigned N) const { 3072 assert(N == 1 && "Invalid number of operands!"); 3073 addNEONi8ReplicateOperands(Inst, false); 3074 } 3075 3076 void addNEONvmovi16ReplicateOperands(MCInst &Inst, unsigned N) const { 3077 assert(N == 1 && "Invalid number of operands!"); 3078 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 3079 assert((Inst.getOpcode() == ARM::VMOVv4i16 || 3080 Inst.getOpcode() == ARM::VMOVv8i16 || 3081 Inst.getOpcode() == ARM::VMVNv4i16 || 3082 Inst.getOpcode() == ARM::VMVNv8i16) && 3083 "All instructions that want to replicate non-zero half-word " 3084 "always must be replaced with V{MOV,MVN}v{4,8}i16."); 3085 uint64_t Value = CE->getValue(); 3086 unsigned Elem = Value & 0xffff; 3087 if (Elem >= 256) 3088 Elem = (Elem >> 8) | 0x200; 3089 Inst.addOperand(MCOperand::createImm(Elem)); 3090 } 3091 3092 void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const { 3093 assert(N == 1 && "Invalid number of operands!"); 3094 // The immediate encodes the type of constant as well as the value. 3095 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 3096 unsigned Value = encodeNeonVMOVImmediate(~CE->getValue()); 3097 Inst.addOperand(MCOperand::createImm(Value)); 3098 } 3099 3100 void addNEONvmovi32ReplicateOperands(MCInst &Inst, unsigned N) const { 3101 assert(N == 1 && "Invalid number of operands!"); 3102 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 3103 assert((Inst.getOpcode() == ARM::VMOVv2i32 || 3104 Inst.getOpcode() == ARM::VMOVv4i32 || 3105 Inst.getOpcode() == ARM::VMVNv2i32 || 3106 Inst.getOpcode() == ARM::VMVNv4i32) && 3107 "All instructions that want to replicate non-zero word " 3108 "always must be replaced with V{MOV,MVN}v{2,4}i32."); 3109 uint64_t Value = CE->getValue(); 3110 unsigned Elem = encodeNeonVMOVImmediate(Value & 0xffffffff); 3111 Inst.addOperand(MCOperand::createImm(Elem)); 3112 } 3113 3114 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 3115 assert(N == 1 && "Invalid number of operands!"); 3116 // The immediate encodes the type of constant as well as the value. 3117 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 3118 uint64_t Value = CE->getValue(); 3119 unsigned Imm = 0; 3120 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 3121 Imm |= (Value & 1) << i; 3122 } 3123 Inst.addOperand(MCOperand::createImm(Imm | 0x1e00)); 3124 } 3125 3126 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const { 3127 assert(N == 1 && "Invalid number of operands!"); 3128 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 3129 Inst.addOperand(MCOperand::createImm(CE->getValue() / 90)); 3130 } 3131 3132 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const { 3133 assert(N == 1 && "Invalid number of operands!"); 3134 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 3135 Inst.addOperand(MCOperand::createImm((CE->getValue() - 90) / 180)); 3136 } 3137 3138 void print(raw_ostream &OS) const override; 3139 3140 static std::unique_ptr<ARMOperand> CreateITMask(unsigned Mask, SMLoc S) { 3141 auto Op = make_unique<ARMOperand>(k_ITCondMask); 3142 Op->ITMask.Mask = Mask; 3143 Op->StartLoc = S; 3144 Op->EndLoc = S; 3145 return Op; 3146 } 3147 3148 static std::unique_ptr<ARMOperand> CreateCondCode(ARMCC::CondCodes CC, 3149 SMLoc S) { 3150 auto Op = make_unique<ARMOperand>(k_CondCode); 3151 Op->CC.Val = CC; 3152 Op->StartLoc = S; 3153 Op->EndLoc = S; 3154 return Op; 3155 } 3156 3157 static std::unique_ptr<ARMOperand> CreateVPTPred(ARMVCC::VPTCodes CC, 3158 SMLoc S) { 3159 auto Op = make_unique<ARMOperand>(k_VPTPred); 3160 Op->VCC.Val = CC; 3161 Op->StartLoc = S; 3162 Op->EndLoc = S; 3163 return Op; 3164 } 3165 3166 static std::unique_ptr<ARMOperand> CreateCoprocNum(unsigned CopVal, SMLoc S) { 3167 auto Op = make_unique<ARMOperand>(k_CoprocNum); 3168 Op->Cop.Val = CopVal; 3169 Op->StartLoc = S; 3170 Op->EndLoc = S; 3171 return Op; 3172 } 3173 3174 static std::unique_ptr<ARMOperand> CreateCoprocReg(unsigned CopVal, SMLoc S) { 3175 auto Op = make_unique<ARMOperand>(k_CoprocReg); 3176 Op->Cop.Val = CopVal; 3177 Op->StartLoc = S; 3178 Op->EndLoc = S; 3179 return Op; 3180 } 3181 3182 static std::unique_ptr<ARMOperand> CreateCoprocOption(unsigned Val, SMLoc S, 3183 SMLoc E) { 3184 auto Op = make_unique<ARMOperand>(k_CoprocOption); 3185 Op->Cop.Val = Val; 3186 Op->StartLoc = S; 3187 Op->EndLoc = E; 3188 return Op; 3189 } 3190 3191 static std::unique_ptr<ARMOperand> CreateCCOut(unsigned RegNum, SMLoc S) { 3192 auto Op = make_unique<ARMOperand>(k_CCOut); 3193 Op->Reg.RegNum = RegNum; 3194 Op->StartLoc = S; 3195 Op->EndLoc = S; 3196 return Op; 3197 } 3198 3199 static std::unique_ptr<ARMOperand> CreateToken(StringRef Str, SMLoc S) { 3200 auto Op = make_unique<ARMOperand>(k_Token); 3201 Op->Tok.Data = Str.data(); 3202 Op->Tok.Length = Str.size(); 3203 Op->StartLoc = S; 3204 Op->EndLoc = S; 3205 return Op; 3206 } 3207 3208 static std::unique_ptr<ARMOperand> CreateReg(unsigned RegNum, SMLoc S, 3209 SMLoc E) { 3210 auto Op = make_unique<ARMOperand>(k_Register); 3211 Op->Reg.RegNum = RegNum; 3212 Op->StartLoc = S; 3213 Op->EndLoc = E; 3214 return Op; 3215 } 3216 3217 static std::unique_ptr<ARMOperand> 3218 CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, unsigned SrcReg, 3219 unsigned ShiftReg, unsigned ShiftImm, SMLoc S, 3220 SMLoc E) { 3221 auto Op = make_unique<ARMOperand>(k_ShiftedRegister); 3222 Op->RegShiftedReg.ShiftTy = ShTy; 3223 Op->RegShiftedReg.SrcReg = SrcReg; 3224 Op->RegShiftedReg.ShiftReg = ShiftReg; 3225 Op->RegShiftedReg.ShiftImm = ShiftImm; 3226 Op->StartLoc = S; 3227 Op->EndLoc = E; 3228 return Op; 3229 } 3230 3231 static std::unique_ptr<ARMOperand> 3232 CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, unsigned SrcReg, 3233 unsigned ShiftImm, SMLoc S, SMLoc E) { 3234 auto Op = make_unique<ARMOperand>(k_ShiftedImmediate); 3235 Op->RegShiftedImm.ShiftTy = ShTy; 3236 Op->RegShiftedImm.SrcReg = SrcReg; 3237 Op->RegShiftedImm.ShiftImm = ShiftImm; 3238 Op->StartLoc = S; 3239 Op->EndLoc = E; 3240 return Op; 3241 } 3242 3243 static std::unique_ptr<ARMOperand> CreateShifterImm(bool isASR, unsigned Imm, 3244 SMLoc S, SMLoc E) { 3245 auto Op = make_unique<ARMOperand>(k_ShifterImmediate); 3246 Op->ShifterImm.isASR = isASR; 3247 Op->ShifterImm.Imm = Imm; 3248 Op->StartLoc = S; 3249 Op->EndLoc = E; 3250 return Op; 3251 } 3252 3253 static std::unique_ptr<ARMOperand> CreateRotImm(unsigned Imm, SMLoc S, 3254 SMLoc E) { 3255 auto Op = make_unique<ARMOperand>(k_RotateImmediate); 3256 Op->RotImm.Imm = Imm; 3257 Op->StartLoc = S; 3258 Op->EndLoc = E; 3259 return Op; 3260 } 3261 3262 static std::unique_ptr<ARMOperand> CreateModImm(unsigned Bits, unsigned Rot, 3263 SMLoc S, SMLoc E) { 3264 auto Op = make_unique<ARMOperand>(k_ModifiedImmediate); 3265 Op->ModImm.Bits = Bits; 3266 Op->ModImm.Rot = Rot; 3267 Op->StartLoc = S; 3268 Op->EndLoc = E; 3269 return Op; 3270 } 3271 3272 static std::unique_ptr<ARMOperand> 3273 CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) { 3274 auto Op = make_unique<ARMOperand>(k_ConstantPoolImmediate); 3275 Op->Imm.Val = Val; 3276 Op->StartLoc = S; 3277 Op->EndLoc = E; 3278 return Op; 3279 } 3280 3281 static std::unique_ptr<ARMOperand> 3282 CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) { 3283 auto Op = make_unique<ARMOperand>(k_BitfieldDescriptor); 3284 Op->Bitfield.LSB = LSB; 3285 Op->Bitfield.Width = Width; 3286 Op->StartLoc = S; 3287 Op->EndLoc = E; 3288 return Op; 3289 } 3290 3291 static std::unique_ptr<ARMOperand> 3292 CreateRegList(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs, 3293 SMLoc StartLoc, SMLoc EndLoc) { 3294 assert(Regs.size() > 0 && "RegList contains no registers?"); 3295 KindTy Kind = k_RegisterList; 3296 3297 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains( 3298 Regs.front().second)) { 3299 if (Regs.back().second == ARM::VPR) 3300 Kind = k_FPDRegisterListWithVPR; 3301 else 3302 Kind = k_DPRRegisterList; 3303 } else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains( 3304 Regs.front().second)) { 3305 if (Regs.back().second == ARM::VPR) 3306 Kind = k_FPSRegisterListWithVPR; 3307 else 3308 Kind = k_SPRRegisterList; 3309 } 3310 3311 // Sort based on the register encoding values. 3312 array_pod_sort(Regs.begin(), Regs.end()); 3313 3314 if (Kind == k_RegisterList && Regs.back().second == ARM::APSR) 3315 Kind = k_RegisterListWithAPSR; 3316 3317 auto Op = make_unique<ARMOperand>(Kind); 3318 for (SmallVectorImpl<std::pair<unsigned, unsigned>>::const_iterator 3319 I = Regs.begin(), E = Regs.end(); I != E; ++I) 3320 Op->Registers.push_back(I->second); 3321 3322 Op->StartLoc = StartLoc; 3323 Op->EndLoc = EndLoc; 3324 return Op; 3325 } 3326 3327 static std::unique_ptr<ARMOperand> CreateVectorList(unsigned RegNum, 3328 unsigned Count, 3329 bool isDoubleSpaced, 3330 SMLoc S, SMLoc E) { 3331 auto Op = make_unique<ARMOperand>(k_VectorList); 3332 Op->VectorList.RegNum = RegNum; 3333 Op->VectorList.Count = Count; 3334 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 3335 Op->StartLoc = S; 3336 Op->EndLoc = E; 3337 return Op; 3338 } 3339 3340 static std::unique_ptr<ARMOperand> 3341 CreateVectorListAllLanes(unsigned RegNum, unsigned Count, bool isDoubleSpaced, 3342 SMLoc S, SMLoc E) { 3343 auto Op = make_unique<ARMOperand>(k_VectorListAllLanes); 3344 Op->VectorList.RegNum = RegNum; 3345 Op->VectorList.Count = Count; 3346 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 3347 Op->StartLoc = S; 3348 Op->EndLoc = E; 3349 return Op; 3350 } 3351 3352 static std::unique_ptr<ARMOperand> 3353 CreateVectorListIndexed(unsigned RegNum, unsigned Count, unsigned Index, 3354 bool isDoubleSpaced, SMLoc S, SMLoc E) { 3355 auto Op = make_unique<ARMOperand>(k_VectorListIndexed); 3356 Op->VectorList.RegNum = RegNum; 3357 Op->VectorList.Count = Count; 3358 Op->VectorList.LaneIndex = Index; 3359 Op->VectorList.isDoubleSpaced = isDoubleSpaced; 3360 Op->StartLoc = S; 3361 Op->EndLoc = E; 3362 return Op; 3363 } 3364 3365 static std::unique_ptr<ARMOperand> 3366 CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, MCContext &Ctx) { 3367 auto Op = make_unique<ARMOperand>(k_VectorIndex); 3368 Op->VectorIndex.Val = Idx; 3369 Op->StartLoc = S; 3370 Op->EndLoc = E; 3371 return Op; 3372 } 3373 3374 static std::unique_ptr<ARMOperand> CreateImm(const MCExpr *Val, SMLoc S, 3375 SMLoc E) { 3376 auto Op = make_unique<ARMOperand>(k_Immediate); 3377 Op->Imm.Val = Val; 3378 Op->StartLoc = S; 3379 Op->EndLoc = E; 3380 return Op; 3381 } 3382 3383 static std::unique_ptr<ARMOperand> 3384 CreateMem(unsigned BaseRegNum, const MCConstantExpr *OffsetImm, 3385 unsigned OffsetRegNum, ARM_AM::ShiftOpc ShiftType, 3386 unsigned ShiftImm, unsigned Alignment, bool isNegative, SMLoc S, 3387 SMLoc E, SMLoc AlignmentLoc = SMLoc()) { 3388 auto Op = make_unique<ARMOperand>(k_Memory); 3389 Op->Memory.BaseRegNum = BaseRegNum; 3390 Op->Memory.OffsetImm = OffsetImm; 3391 Op->Memory.OffsetRegNum = OffsetRegNum; 3392 Op->Memory.ShiftType = ShiftType; 3393 Op->Memory.ShiftImm = ShiftImm; 3394 Op->Memory.Alignment = Alignment; 3395 Op->Memory.isNegative = isNegative; 3396 Op->StartLoc = S; 3397 Op->EndLoc = E; 3398 Op->AlignmentLoc = AlignmentLoc; 3399 return Op; 3400 } 3401 3402 static std::unique_ptr<ARMOperand> 3403 CreatePostIdxReg(unsigned RegNum, bool isAdd, ARM_AM::ShiftOpc ShiftTy, 3404 unsigned ShiftImm, SMLoc S, SMLoc E) { 3405 auto Op = make_unique<ARMOperand>(k_PostIndexRegister); 3406 Op->PostIdxReg.RegNum = RegNum; 3407 Op->PostIdxReg.isAdd = isAdd; 3408 Op->PostIdxReg.ShiftTy = ShiftTy; 3409 Op->PostIdxReg.ShiftImm = ShiftImm; 3410 Op->StartLoc = S; 3411 Op->EndLoc = E; 3412 return Op; 3413 } 3414 3415 static std::unique_ptr<ARMOperand> CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, 3416 SMLoc S) { 3417 auto Op = make_unique<ARMOperand>(k_MemBarrierOpt); 3418 Op->MBOpt.Val = Opt; 3419 Op->StartLoc = S; 3420 Op->EndLoc = S; 3421 return Op; 3422 } 3423 3424 static std::unique_ptr<ARMOperand> 3425 CreateInstSyncBarrierOpt(ARM_ISB::InstSyncBOpt Opt, SMLoc S) { 3426 auto Op = make_unique<ARMOperand>(k_InstSyncBarrierOpt); 3427 Op->ISBOpt.Val = Opt; 3428 Op->StartLoc = S; 3429 Op->EndLoc = S; 3430 return Op; 3431 } 3432 3433 static std::unique_ptr<ARMOperand> 3434 CreateTraceSyncBarrierOpt(ARM_TSB::TraceSyncBOpt Opt, SMLoc S) { 3435 auto Op = make_unique<ARMOperand>(k_TraceSyncBarrierOpt); 3436 Op->TSBOpt.Val = Opt; 3437 Op->StartLoc = S; 3438 Op->EndLoc = S; 3439 return Op; 3440 } 3441 3442 static std::unique_ptr<ARMOperand> CreateProcIFlags(ARM_PROC::IFlags IFlags, 3443 SMLoc S) { 3444 auto Op = make_unique<ARMOperand>(k_ProcIFlags); 3445 Op->IFlags.Val = IFlags; 3446 Op->StartLoc = S; 3447 Op->EndLoc = S; 3448 return Op; 3449 } 3450 3451 static std::unique_ptr<ARMOperand> CreateMSRMask(unsigned MMask, SMLoc S) { 3452 auto Op = make_unique<ARMOperand>(k_MSRMask); 3453 Op->MMask.Val = MMask; 3454 Op->StartLoc = S; 3455 Op->EndLoc = S; 3456 return Op; 3457 } 3458 3459 static std::unique_ptr<ARMOperand> CreateBankedReg(unsigned Reg, SMLoc S) { 3460 auto Op = make_unique<ARMOperand>(k_BankedReg); 3461 Op->BankedReg.Val = Reg; 3462 Op->StartLoc = S; 3463 Op->EndLoc = S; 3464 return Op; 3465 } 3466 }; 3467 3468 } // end anonymous namespace. 3469 3470 void ARMOperand::print(raw_ostream &OS) const { 3471 auto RegName = [](unsigned Reg) { 3472 if (Reg) 3473 return ARMInstPrinter::getRegisterName(Reg); 3474 else 3475 return "noreg"; 3476 }; 3477 3478 switch (Kind) { 3479 case k_CondCode: 3480 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 3481 break; 3482 case k_VPTPred: 3483 OS << "<ARMVCC::" << ARMVPTPredToString(getVPTPred()) << ">"; 3484 break; 3485 case k_CCOut: 3486 OS << "<ccout " << RegName(getReg()) << ">"; 3487 break; 3488 case k_ITCondMask: { 3489 static const char *const MaskStr[] = { 3490 "(invalid)", "(tttt)", "(ttt)", "(ttte)", 3491 "(tt)", "(ttet)", "(tte)", "(ttee)", 3492 "(t)", "(tett)", "(tet)", "(tete)", 3493 "(te)", "(teet)", "(tee)", "(teee)", 3494 }; 3495 assert((ITMask.Mask & 0xf) == ITMask.Mask); 3496 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 3497 break; 3498 } 3499 case k_CoprocNum: 3500 OS << "<coprocessor number: " << getCoproc() << ">"; 3501 break; 3502 case k_CoprocReg: 3503 OS << "<coprocessor register: " << getCoproc() << ">"; 3504 break; 3505 case k_CoprocOption: 3506 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 3507 break; 3508 case k_MSRMask: 3509 OS << "<mask: " << getMSRMask() << ">"; 3510 break; 3511 case k_BankedReg: 3512 OS << "<banked reg: " << getBankedReg() << ">"; 3513 break; 3514 case k_Immediate: 3515 OS << *getImm(); 3516 break; 3517 case k_MemBarrierOpt: 3518 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt(), false) << ">"; 3519 break; 3520 case k_InstSyncBarrierOpt: 3521 OS << "<ARM_ISB::" << InstSyncBOptToString(getInstSyncBarrierOpt()) << ">"; 3522 break; 3523 case k_TraceSyncBarrierOpt: 3524 OS << "<ARM_TSB::" << TraceSyncBOptToString(getTraceSyncBarrierOpt()) << ">"; 3525 break; 3526 case k_Memory: 3527 OS << "<memory"; 3528 if (Memory.BaseRegNum) 3529 OS << " base:" << RegName(Memory.BaseRegNum); 3530 if (Memory.OffsetImm) 3531 OS << " offset-imm:" << *Memory.OffsetImm; 3532 if (Memory.OffsetRegNum) 3533 OS << " offset-reg:" << (Memory.isNegative ? "-" : "") 3534 << RegName(Memory.OffsetRegNum); 3535 if (Memory.ShiftType != ARM_AM::no_shift) { 3536 OS << " shift-type:" << ARM_AM::getShiftOpcStr(Memory.ShiftType); 3537 OS << " shift-imm:" << Memory.ShiftImm; 3538 } 3539 if (Memory.Alignment) 3540 OS << " alignment:" << Memory.Alignment; 3541 OS << ">"; 3542 break; 3543 case k_PostIndexRegister: 3544 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 3545 << RegName(PostIdxReg.RegNum); 3546 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 3547 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 3548 << PostIdxReg.ShiftImm; 3549 OS << ">"; 3550 break; 3551 case k_ProcIFlags: { 3552 OS << "<ARM_PROC::"; 3553 unsigned IFlags = getProcIFlags(); 3554 for (int i=2; i >= 0; --i) 3555 if (IFlags & (1 << i)) 3556 OS << ARM_PROC::IFlagsToString(1 << i); 3557 OS << ">"; 3558 break; 3559 } 3560 case k_Register: 3561 OS << "<register " << RegName(getReg()) << ">"; 3562 break; 3563 case k_ShifterImmediate: 3564 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 3565 << " #" << ShifterImm.Imm << ">"; 3566 break; 3567 case k_ShiftedRegister: 3568 OS << "<so_reg_reg " << RegName(RegShiftedReg.SrcReg) << " " 3569 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) << " " 3570 << RegName(RegShiftedReg.ShiftReg) << ">"; 3571 break; 3572 case k_ShiftedImmediate: 3573 OS << "<so_reg_imm " << RegName(RegShiftedImm.SrcReg) << " " 3574 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) << " #" 3575 << RegShiftedImm.ShiftImm << ">"; 3576 break; 3577 case k_RotateImmediate: 3578 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 3579 break; 3580 case k_ModifiedImmediate: 3581 OS << "<mod_imm #" << ModImm.Bits << ", #" 3582 << ModImm.Rot << ")>"; 3583 break; 3584 case k_ConstantPoolImmediate: 3585 OS << "<constant_pool_imm #" << *getConstantPoolImm(); 3586 break; 3587 case k_BitfieldDescriptor: 3588 OS << "<bitfield " << "lsb: " << Bitfield.LSB 3589 << ", width: " << Bitfield.Width << ">"; 3590 break; 3591 case k_RegisterList: 3592 case k_RegisterListWithAPSR: 3593 case k_DPRRegisterList: 3594 case k_SPRRegisterList: 3595 case k_FPSRegisterListWithVPR: 3596 case k_FPDRegisterListWithVPR: { 3597 OS << "<register_list "; 3598 3599 const SmallVectorImpl<unsigned> &RegList = getRegList(); 3600 for (SmallVectorImpl<unsigned>::const_iterator 3601 I = RegList.begin(), E = RegList.end(); I != E; ) { 3602 OS << RegName(*I); 3603 if (++I < E) OS << ", "; 3604 } 3605 3606 OS << ">"; 3607 break; 3608 } 3609 case k_VectorList: 3610 OS << "<vector_list " << VectorList.Count << " * " 3611 << RegName(VectorList.RegNum) << ">"; 3612 break; 3613 case k_VectorListAllLanes: 3614 OS << "<vector_list(all lanes) " << VectorList.Count << " * " 3615 << RegName(VectorList.RegNum) << ">"; 3616 break; 3617 case k_VectorListIndexed: 3618 OS << "<vector_list(lane " << VectorList.LaneIndex << ") " 3619 << VectorList.Count << " * " << RegName(VectorList.RegNum) << ">"; 3620 break; 3621 case k_Token: 3622 OS << "'" << getToken() << "'"; 3623 break; 3624 case k_VectorIndex: 3625 OS << "<vectorindex " << getVectorIndex() << ">"; 3626 break; 3627 } 3628 } 3629 3630 /// @name Auto-generated Match Functions 3631 /// { 3632 3633 static unsigned MatchRegisterName(StringRef Name); 3634 3635 /// } 3636 3637 bool ARMAsmParser::ParseRegister(unsigned &RegNo, 3638 SMLoc &StartLoc, SMLoc &EndLoc) { 3639 const AsmToken &Tok = getParser().getTok(); 3640 StartLoc = Tok.getLoc(); 3641 EndLoc = Tok.getEndLoc(); 3642 RegNo = tryParseRegister(); 3643 3644 return (RegNo == (unsigned)-1); 3645 } 3646 3647 /// Try to parse a register name. The token must be an Identifier when called, 3648 /// and if it is a register name the token is eaten and the register number is 3649 /// returned. Otherwise return -1. 3650 int ARMAsmParser::tryParseRegister() { 3651 MCAsmParser &Parser = getParser(); 3652 const AsmToken &Tok = Parser.getTok(); 3653 if (Tok.isNot(AsmToken::Identifier)) return -1; 3654 3655 std::string lowerCase = Tok.getString().lower(); 3656 unsigned RegNum = MatchRegisterName(lowerCase); 3657 if (!RegNum) { 3658 RegNum = StringSwitch<unsigned>(lowerCase) 3659 .Case("r13", ARM::SP) 3660 .Case("r14", ARM::LR) 3661 .Case("r15", ARM::PC) 3662 .Case("ip", ARM::R12) 3663 // Additional register name aliases for 'gas' compatibility. 3664 .Case("a1", ARM::R0) 3665 .Case("a2", ARM::R1) 3666 .Case("a3", ARM::R2) 3667 .Case("a4", ARM::R3) 3668 .Case("v1", ARM::R4) 3669 .Case("v2", ARM::R5) 3670 .Case("v3", ARM::R6) 3671 .Case("v4", ARM::R7) 3672 .Case("v5", ARM::R8) 3673 .Case("v6", ARM::R9) 3674 .Case("v7", ARM::R10) 3675 .Case("v8", ARM::R11) 3676 .Case("sb", ARM::R9) 3677 .Case("sl", ARM::R10) 3678 .Case("fp", ARM::R11) 3679 .Default(0); 3680 } 3681 if (!RegNum) { 3682 // Check for aliases registered via .req. Canonicalize to lower case. 3683 // That's more consistent since register names are case insensitive, and 3684 // it's how the original entry was passed in from MC/MCParser/AsmParser. 3685 StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase); 3686 // If no match, return failure. 3687 if (Entry == RegisterReqs.end()) 3688 return -1; 3689 Parser.Lex(); // Eat identifier token. 3690 return Entry->getValue(); 3691 } 3692 3693 // Some FPUs only have 16 D registers, so D16-D31 are invalid 3694 if (!hasD32() && RegNum >= ARM::D16 && RegNum <= ARM::D31) 3695 return -1; 3696 3697 Parser.Lex(); // Eat identifier token. 3698 3699 return RegNum; 3700 } 3701 3702 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 3703 // If a recoverable error occurs, return 1. If an irrecoverable error 3704 // occurs, return -1. An irrecoverable error is one where tokens have been 3705 // consumed in the process of trying to parse the shifter (i.e., when it is 3706 // indeed a shifter operand, but malformed). 3707 int ARMAsmParser::tryParseShiftRegister(OperandVector &Operands) { 3708 MCAsmParser &Parser = getParser(); 3709 SMLoc S = Parser.getTok().getLoc(); 3710 const AsmToken &Tok = Parser.getTok(); 3711 if (Tok.isNot(AsmToken::Identifier)) 3712 return -1; 3713 3714 std::string lowerCase = Tok.getString().lower(); 3715 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 3716 .Case("asl", ARM_AM::lsl) 3717 .Case("lsl", ARM_AM::lsl) 3718 .Case("lsr", ARM_AM::lsr) 3719 .Case("asr", ARM_AM::asr) 3720 .Case("ror", ARM_AM::ror) 3721 .Case("rrx", ARM_AM::rrx) 3722 .Default(ARM_AM::no_shift); 3723 3724 if (ShiftTy == ARM_AM::no_shift) 3725 return 1; 3726 3727 Parser.Lex(); // Eat the operator. 3728 3729 // The source register for the shift has already been added to the 3730 // operand list, so we need to pop it off and combine it into the shifted 3731 // register operand instead. 3732 std::unique_ptr<ARMOperand> PrevOp( 3733 (ARMOperand *)Operands.pop_back_val().release()); 3734 if (!PrevOp->isReg()) 3735 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 3736 int SrcReg = PrevOp->getReg(); 3737 3738 SMLoc EndLoc; 3739 int64_t Imm = 0; 3740 int ShiftReg = 0; 3741 if (ShiftTy == ARM_AM::rrx) { 3742 // RRX Doesn't have an explicit shift amount. The encoder expects 3743 // the shift register to be the same as the source register. Seems odd, 3744 // but OK. 3745 ShiftReg = SrcReg; 3746 } else { 3747 // Figure out if this is shifted by a constant or a register (for non-RRX). 3748 if (Parser.getTok().is(AsmToken::Hash) || 3749 Parser.getTok().is(AsmToken::Dollar)) { 3750 Parser.Lex(); // Eat hash. 3751 SMLoc ImmLoc = Parser.getTok().getLoc(); 3752 const MCExpr *ShiftExpr = nullptr; 3753 if (getParser().parseExpression(ShiftExpr, EndLoc)) { 3754 Error(ImmLoc, "invalid immediate shift value"); 3755 return -1; 3756 } 3757 // The expression must be evaluatable as an immediate. 3758 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 3759 if (!CE) { 3760 Error(ImmLoc, "invalid immediate shift value"); 3761 return -1; 3762 } 3763 // Range check the immediate. 3764 // lsl, ror: 0 <= imm <= 31 3765 // lsr, asr: 0 <= imm <= 32 3766 Imm = CE->getValue(); 3767 if (Imm < 0 || 3768 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 3769 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 3770 Error(ImmLoc, "immediate shift value out of range"); 3771 return -1; 3772 } 3773 // shift by zero is a nop. Always send it through as lsl. 3774 // ('as' compatibility) 3775 if (Imm == 0) 3776 ShiftTy = ARM_AM::lsl; 3777 } else if (Parser.getTok().is(AsmToken::Identifier)) { 3778 SMLoc L = Parser.getTok().getLoc(); 3779 EndLoc = Parser.getTok().getEndLoc(); 3780 ShiftReg = tryParseRegister(); 3781 if (ShiftReg == -1) { 3782 Error(L, "expected immediate or register in shift operand"); 3783 return -1; 3784 } 3785 } else { 3786 Error(Parser.getTok().getLoc(), 3787 "expected immediate or register in shift operand"); 3788 return -1; 3789 } 3790 } 3791 3792 if (ShiftReg && ShiftTy != ARM_AM::rrx) 3793 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 3794 ShiftReg, Imm, 3795 S, EndLoc)); 3796 else 3797 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 3798 S, EndLoc)); 3799 3800 return 0; 3801 } 3802 3803 /// Try to parse a register name. The token must be an Identifier when called. 3804 /// If it's a register, an AsmOperand is created. Another AsmOperand is created 3805 /// if there is a "writeback". 'true' if it's not a register. 3806 /// 3807 /// TODO this is likely to change to allow different register types and or to 3808 /// parse for a specific register type. 3809 bool ARMAsmParser::tryParseRegisterWithWriteBack(OperandVector &Operands) { 3810 MCAsmParser &Parser = getParser(); 3811 SMLoc RegStartLoc = Parser.getTok().getLoc(); 3812 SMLoc RegEndLoc = Parser.getTok().getEndLoc(); 3813 int RegNo = tryParseRegister(); 3814 if (RegNo == -1) 3815 return true; 3816 3817 Operands.push_back(ARMOperand::CreateReg(RegNo, RegStartLoc, RegEndLoc)); 3818 3819 const AsmToken &ExclaimTok = Parser.getTok(); 3820 if (ExclaimTok.is(AsmToken::Exclaim)) { 3821 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 3822 ExclaimTok.getLoc())); 3823 Parser.Lex(); // Eat exclaim token 3824 return false; 3825 } 3826 3827 // Also check for an index operand. This is only legal for vector registers, 3828 // but that'll get caught OK in operand matching, so we don't need to 3829 // explicitly filter everything else out here. 3830 if (Parser.getTok().is(AsmToken::LBrac)) { 3831 SMLoc SIdx = Parser.getTok().getLoc(); 3832 Parser.Lex(); // Eat left bracket token. 3833 3834 const MCExpr *ImmVal; 3835 if (getParser().parseExpression(ImmVal)) 3836 return true; 3837 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 3838 if (!MCE) 3839 return TokError("immediate value expected for vector index"); 3840 3841 if (Parser.getTok().isNot(AsmToken::RBrac)) 3842 return Error(Parser.getTok().getLoc(), "']' expected"); 3843 3844 SMLoc E = Parser.getTok().getEndLoc(); 3845 Parser.Lex(); // Eat right bracket token. 3846 3847 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 3848 SIdx, E, 3849 getContext())); 3850 } 3851 3852 return false; 3853 } 3854 3855 /// MatchCoprocessorOperandName - Try to parse an coprocessor related 3856 /// instruction with a symbolic operand name. 3857 /// We accept "crN" syntax for GAS compatibility. 3858 /// <operand-name> ::= <prefix><number> 3859 /// If CoprocOp is 'c', then: 3860 /// <prefix> ::= c | cr 3861 /// If CoprocOp is 'p', then : 3862 /// <prefix> ::= p 3863 /// <number> ::= integer in range [0, 15] 3864 static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 3865 // Use the same layout as the tablegen'erated register name matcher. Ugly, 3866 // but efficient. 3867 if (Name.size() < 2 || Name[0] != CoprocOp) 3868 return -1; 3869 Name = (Name[1] == 'r') ? Name.drop_front(2) : Name.drop_front(); 3870 3871 switch (Name.size()) { 3872 default: return -1; 3873 case 1: 3874 switch (Name[0]) { 3875 default: return -1; 3876 case '0': return 0; 3877 case '1': return 1; 3878 case '2': return 2; 3879 case '3': return 3; 3880 case '4': return 4; 3881 case '5': return 5; 3882 case '6': return 6; 3883 case '7': return 7; 3884 case '8': return 8; 3885 case '9': return 9; 3886 } 3887 case 2: 3888 if (Name[0] != '1') 3889 return -1; 3890 switch (Name[1]) { 3891 default: return -1; 3892 // CP10 and CP11 are VFP/NEON and so vector instructions should be used. 3893 // However, old cores (v5/v6) did use them in that way. 3894 case '0': return 10; 3895 case '1': return 11; 3896 case '2': return 12; 3897 case '3': return 13; 3898 case '4': return 14; 3899 case '5': return 15; 3900 } 3901 } 3902 } 3903 3904 /// parseITCondCode - Try to parse a condition code for an IT instruction. 3905 OperandMatchResultTy 3906 ARMAsmParser::parseITCondCode(OperandVector &Operands) { 3907 MCAsmParser &Parser = getParser(); 3908 SMLoc S = Parser.getTok().getLoc(); 3909 const AsmToken &Tok = Parser.getTok(); 3910 if (!Tok.is(AsmToken::Identifier)) 3911 return MatchOperand_NoMatch; 3912 unsigned CC = ARMCondCodeFromString(Tok.getString()); 3913 if (CC == ~0U) 3914 return MatchOperand_NoMatch; 3915 Parser.Lex(); // Eat the token. 3916 3917 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 3918 3919 return MatchOperand_Success; 3920 } 3921 3922 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 3923 /// token must be an Identifier when called, and if it is a coprocessor 3924 /// number, the token is eaten and the operand is added to the operand list. 3925 OperandMatchResultTy 3926 ARMAsmParser::parseCoprocNumOperand(OperandVector &Operands) { 3927 MCAsmParser &Parser = getParser(); 3928 SMLoc S = Parser.getTok().getLoc(); 3929 const AsmToken &Tok = Parser.getTok(); 3930 if (Tok.isNot(AsmToken::Identifier)) 3931 return MatchOperand_NoMatch; 3932 3933 int Num = MatchCoprocessorOperandName(Tok.getString().lower(), 'p'); 3934 if (Num == -1) 3935 return MatchOperand_NoMatch; 3936 // ARMv7 and v8 don't allow cp10/cp11 due to VFP/NEON specific instructions 3937 if ((hasV7Ops() || hasV8Ops()) && (Num == 10 || Num == 11)) 3938 return MatchOperand_NoMatch; 3939 3940 Parser.Lex(); // Eat identifier token. 3941 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 3942 return MatchOperand_Success; 3943 } 3944 3945 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 3946 /// token must be an Identifier when called, and if it is a coprocessor 3947 /// number, the token is eaten and the operand is added to the operand list. 3948 OperandMatchResultTy 3949 ARMAsmParser::parseCoprocRegOperand(OperandVector &Operands) { 3950 MCAsmParser &Parser = getParser(); 3951 SMLoc S = Parser.getTok().getLoc(); 3952 const AsmToken &Tok = Parser.getTok(); 3953 if (Tok.isNot(AsmToken::Identifier)) 3954 return MatchOperand_NoMatch; 3955 3956 int Reg = MatchCoprocessorOperandName(Tok.getString().lower(), 'c'); 3957 if (Reg == -1) 3958 return MatchOperand_NoMatch; 3959 3960 Parser.Lex(); // Eat identifier token. 3961 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 3962 return MatchOperand_Success; 3963 } 3964 3965 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 3966 /// coproc_option : '{' imm0_255 '}' 3967 OperandMatchResultTy 3968 ARMAsmParser::parseCoprocOptionOperand(OperandVector &Operands) { 3969 MCAsmParser &Parser = getParser(); 3970 SMLoc S = Parser.getTok().getLoc(); 3971 3972 // If this isn't a '{', this isn't a coprocessor immediate operand. 3973 if (Parser.getTok().isNot(AsmToken::LCurly)) 3974 return MatchOperand_NoMatch; 3975 Parser.Lex(); // Eat the '{' 3976 3977 const MCExpr *Expr; 3978 SMLoc Loc = Parser.getTok().getLoc(); 3979 if (getParser().parseExpression(Expr)) { 3980 Error(Loc, "illegal expression"); 3981 return MatchOperand_ParseFail; 3982 } 3983 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3984 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 3985 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 3986 return MatchOperand_ParseFail; 3987 } 3988 int Val = CE->getValue(); 3989 3990 // Check for and consume the closing '}' 3991 if (Parser.getTok().isNot(AsmToken::RCurly)) 3992 return MatchOperand_ParseFail; 3993 SMLoc E = Parser.getTok().getEndLoc(); 3994 Parser.Lex(); // Eat the '}' 3995 3996 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 3997 return MatchOperand_Success; 3998 } 3999 4000 // For register list parsing, we need to map from raw GPR register numbering 4001 // to the enumeration values. The enumeration values aren't sorted by 4002 // register number due to our using "sp", "lr" and "pc" as canonical names. 4003 static unsigned getNextRegister(unsigned Reg) { 4004 // If this is a GPR, we need to do it manually, otherwise we can rely 4005 // on the sort ordering of the enumeration since the other reg-classes 4006 // are sane. 4007 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 4008 return Reg + 1; 4009 switch(Reg) { 4010 default: llvm_unreachable("Invalid GPR number!"); 4011 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 4012 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 4013 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 4014 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 4015 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 4016 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 4017 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 4018 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 4019 } 4020 } 4021 4022 /// Parse a register list. 4023 bool ARMAsmParser::parseRegisterList(OperandVector &Operands, 4024 bool EnforceOrder) { 4025 MCAsmParser &Parser = getParser(); 4026 if (Parser.getTok().isNot(AsmToken::LCurly)) 4027 return TokError("Token is not a Left Curly Brace"); 4028 SMLoc S = Parser.getTok().getLoc(); 4029 Parser.Lex(); // Eat '{' token. 4030 SMLoc RegLoc = Parser.getTok().getLoc(); 4031 4032 // Check the first register in the list to see what register class 4033 // this is a list of. 4034 int Reg = tryParseRegister(); 4035 if (Reg == -1) 4036 return Error(RegLoc, "register expected"); 4037 4038 // The reglist instructions have at most 16 registers, so reserve 4039 // space for that many. 4040 int EReg = 0; 4041 SmallVector<std::pair<unsigned, unsigned>, 16> Registers; 4042 4043 // Allow Q regs and just interpret them as the two D sub-registers. 4044 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 4045 Reg = getDRegFromQReg(Reg); 4046 EReg = MRI->getEncodingValue(Reg); 4047 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 4048 ++Reg; 4049 } 4050 const MCRegisterClass *RC; 4051 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 4052 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 4053 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 4054 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 4055 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 4056 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 4057 else if (ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) 4058 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID]; 4059 else 4060 return Error(RegLoc, "invalid register in register list"); 4061 4062 // Store the register. 4063 EReg = MRI->getEncodingValue(Reg); 4064 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 4065 4066 // This starts immediately after the first register token in the list, 4067 // so we can see either a comma or a minus (range separator) as a legal 4068 // next token. 4069 while (Parser.getTok().is(AsmToken::Comma) || 4070 Parser.getTok().is(AsmToken::Minus)) { 4071 if (Parser.getTok().is(AsmToken::Minus)) { 4072 Parser.Lex(); // Eat the minus. 4073 SMLoc AfterMinusLoc = Parser.getTok().getLoc(); 4074 int EndReg = tryParseRegister(); 4075 if (EndReg == -1) 4076 return Error(AfterMinusLoc, "register expected"); 4077 // Allow Q regs and just interpret them as the two D sub-registers. 4078 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 4079 EndReg = getDRegFromQReg(EndReg) + 1; 4080 // If the register is the same as the start reg, there's nothing 4081 // more to do. 4082 if (Reg == EndReg) 4083 continue; 4084 // The register must be in the same register class as the first. 4085 if (!RC->contains(EndReg)) 4086 return Error(AfterMinusLoc, "invalid register in register list"); 4087 // Ranges must go from low to high. 4088 if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg)) 4089 return Error(AfterMinusLoc, "bad range in register list"); 4090 4091 // Add all the registers in the range to the register list. 4092 while (Reg != EndReg) { 4093 Reg = getNextRegister(Reg); 4094 EReg = MRI->getEncodingValue(Reg); 4095 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 4096 } 4097 continue; 4098 } 4099 Parser.Lex(); // Eat the comma. 4100 RegLoc = Parser.getTok().getLoc(); 4101 int OldReg = Reg; 4102 const AsmToken RegTok = Parser.getTok(); 4103 Reg = tryParseRegister(); 4104 if (Reg == -1) 4105 return Error(RegLoc, "register expected"); 4106 // Allow Q regs and just interpret them as the two D sub-registers. 4107 bool isQReg = false; 4108 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 4109 Reg = getDRegFromQReg(Reg); 4110 isQReg = true; 4111 } 4112 if (!RC->contains(Reg) && 4113 RC->getID() == ARMMCRegisterClasses[ARM::GPRRegClassID].getID() && 4114 ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) { 4115 // switch the register classes, as GPRwithAPSRnospRegClassID is a partial 4116 // subset of GPRRegClassId except it contains APSR as well. 4117 RC = &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID]; 4118 } 4119 if (Reg == ARM::VPR && (RC == &ARMMCRegisterClasses[ARM::SPRRegClassID] || 4120 RC == &ARMMCRegisterClasses[ARM::DPRRegClassID])) { 4121 RC = &ARMMCRegisterClasses[ARM::FPWithVPRRegClassID]; 4122 EReg = MRI->getEncodingValue(Reg); 4123 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 4124 continue; 4125 } 4126 // The register must be in the same register class as the first. 4127 if (!RC->contains(Reg)) 4128 return Error(RegLoc, "invalid register in register list"); 4129 // In most cases, the list must be monotonically increasing. An 4130 // exception is CLRM, which is order-independent anyway, so 4131 // there's no potential for confusion if you write clrm {r2,r1} 4132 // instead of clrm {r1,r2}. 4133 if (EnforceOrder && 4134 MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) { 4135 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 4136 Warning(RegLoc, "register list not in ascending order"); 4137 else if (!ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains(Reg)) 4138 return Error(RegLoc, "register list not in ascending order"); 4139 } 4140 if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) { 4141 Warning(RegLoc, "duplicated register (" + RegTok.getString() + 4142 ") in register list"); 4143 continue; 4144 } 4145 // VFP register lists must also be contiguous. 4146 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 4147 RC != &ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID] && 4148 Reg != OldReg + 1) 4149 return Error(RegLoc, "non-contiguous register range"); 4150 EReg = MRI->getEncodingValue(Reg); 4151 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 4152 if (isQReg) { 4153 EReg = MRI->getEncodingValue(++Reg); 4154 Registers.push_back(std::pair<unsigned, unsigned>(EReg, Reg)); 4155 } 4156 } 4157 4158 if (Parser.getTok().isNot(AsmToken::RCurly)) 4159 return Error(Parser.getTok().getLoc(), "'}' expected"); 4160 SMLoc E = Parser.getTok().getEndLoc(); 4161 Parser.Lex(); // Eat '}' token. 4162 4163 // Push the register list operand. 4164 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 4165 4166 // The ARM system instruction variants for LDM/STM have a '^' token here. 4167 if (Parser.getTok().is(AsmToken::Caret)) { 4168 Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc())); 4169 Parser.Lex(); // Eat '^' token. 4170 } 4171 4172 return false; 4173 } 4174 4175 // Helper function to parse the lane index for vector lists. 4176 OperandMatchResultTy ARMAsmParser:: 4177 parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) { 4178 MCAsmParser &Parser = getParser(); 4179 Index = 0; // Always return a defined index value. 4180 if (Parser.getTok().is(AsmToken::LBrac)) { 4181 Parser.Lex(); // Eat the '['. 4182 if (Parser.getTok().is(AsmToken::RBrac)) { 4183 // "Dn[]" is the 'all lanes' syntax. 4184 LaneKind = AllLanes; 4185 EndLoc = Parser.getTok().getEndLoc(); 4186 Parser.Lex(); // Eat the ']'. 4187 return MatchOperand_Success; 4188 } 4189 4190 // There's an optional '#' token here. Normally there wouldn't be, but 4191 // inline assemble puts one in, and it's friendly to accept that. 4192 if (Parser.getTok().is(AsmToken::Hash)) 4193 Parser.Lex(); // Eat '#' or '$'. 4194 4195 const MCExpr *LaneIndex; 4196 SMLoc Loc = Parser.getTok().getLoc(); 4197 if (getParser().parseExpression(LaneIndex)) { 4198 Error(Loc, "illegal expression"); 4199 return MatchOperand_ParseFail; 4200 } 4201 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex); 4202 if (!CE) { 4203 Error(Loc, "lane index must be empty or an integer"); 4204 return MatchOperand_ParseFail; 4205 } 4206 if (Parser.getTok().isNot(AsmToken::RBrac)) { 4207 Error(Parser.getTok().getLoc(), "']' expected"); 4208 return MatchOperand_ParseFail; 4209 } 4210 EndLoc = Parser.getTok().getEndLoc(); 4211 Parser.Lex(); // Eat the ']'. 4212 int64_t Val = CE->getValue(); 4213 4214 // FIXME: Make this range check context sensitive for .8, .16, .32. 4215 if (Val < 0 || Val > 7) { 4216 Error(Parser.getTok().getLoc(), "lane index out of range"); 4217 return MatchOperand_ParseFail; 4218 } 4219 Index = Val; 4220 LaneKind = IndexedLane; 4221 return MatchOperand_Success; 4222 } 4223 LaneKind = NoLanes; 4224 return MatchOperand_Success; 4225 } 4226 4227 // parse a vector register list 4228 OperandMatchResultTy 4229 ARMAsmParser::parseVectorList(OperandVector &Operands) { 4230 MCAsmParser &Parser = getParser(); 4231 VectorLaneTy LaneKind; 4232 unsigned LaneIndex; 4233 SMLoc S = Parser.getTok().getLoc(); 4234 // As an extension (to match gas), support a plain D register or Q register 4235 // (without encosing curly braces) as a single or double entry list, 4236 // respectively. 4237 if (Parser.getTok().is(AsmToken::Identifier)) { 4238 SMLoc E = Parser.getTok().getEndLoc(); 4239 int Reg = tryParseRegister(); 4240 if (Reg == -1) 4241 return MatchOperand_NoMatch; 4242 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 4243 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E); 4244 if (Res != MatchOperand_Success) 4245 return Res; 4246 switch (LaneKind) { 4247 case NoLanes: 4248 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E)); 4249 break; 4250 case AllLanes: 4251 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false, 4252 S, E)); 4253 break; 4254 case IndexedLane: 4255 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1, 4256 LaneIndex, 4257 false, S, E)); 4258 break; 4259 } 4260 return MatchOperand_Success; 4261 } 4262 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 4263 Reg = getDRegFromQReg(Reg); 4264 OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E); 4265 if (Res != MatchOperand_Success) 4266 return Res; 4267 switch (LaneKind) { 4268 case NoLanes: 4269 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, 4270 &ARMMCRegisterClasses[ARM::DPairRegClassID]); 4271 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E)); 4272 break; 4273 case AllLanes: 4274 Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0, 4275 &ARMMCRegisterClasses[ARM::DPairRegClassID]); 4276 Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false, 4277 S, E)); 4278 break; 4279 case IndexedLane: 4280 Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2, 4281 LaneIndex, 4282 false, S, E)); 4283 break; 4284 } 4285 return MatchOperand_Success; 4286 } 4287 Error(S, "vector register expected"); 4288 return MatchOperand_ParseFail; 4289 } 4290 4291 if (Parser.getTok().isNot(AsmToken::LCurly)) 4292 return MatchOperand_NoMatch; 4293 4294 Parser.Lex(); // Eat '{' token. 4295 SMLoc RegLoc = Parser.getTok().getLoc(); 4296 4297 int Reg = tryParseRegister(); 4298 if (Reg == -1) { 4299 Error(RegLoc, "register expected"); 4300 return MatchOperand_ParseFail; 4301 } 4302 unsigned Count = 1; 4303 int Spacing = 0; 4304 unsigned FirstReg = Reg; 4305 // The list is of D registers, but we also allow Q regs and just interpret 4306 // them as the two D sub-registers. 4307 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 4308 FirstReg = Reg = getDRegFromQReg(Reg); 4309 Spacing = 1; // double-spacing requires explicit D registers, otherwise 4310 // it's ambiguous with four-register single spaced. 4311 ++Reg; 4312 ++Count; 4313 } 4314 4315 SMLoc E; 4316 if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success) 4317 return MatchOperand_ParseFail; 4318 4319 while (Parser.getTok().is(AsmToken::Comma) || 4320 Parser.getTok().is(AsmToken::Minus)) { 4321 if (Parser.getTok().is(AsmToken::Minus)) { 4322 if (!Spacing) 4323 Spacing = 1; // Register range implies a single spaced list. 4324 else if (Spacing == 2) { 4325 Error(Parser.getTok().getLoc(), 4326 "sequential registers in double spaced list"); 4327 return MatchOperand_ParseFail; 4328 } 4329 Parser.Lex(); // Eat the minus. 4330 SMLoc AfterMinusLoc = Parser.getTok().getLoc(); 4331 int EndReg = tryParseRegister(); 4332 if (EndReg == -1) { 4333 Error(AfterMinusLoc, "register expected"); 4334 return MatchOperand_ParseFail; 4335 } 4336 // Allow Q regs and just interpret them as the two D sub-registers. 4337 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 4338 EndReg = getDRegFromQReg(EndReg) + 1; 4339 // If the register is the same as the start reg, there's nothing 4340 // more to do. 4341 if (Reg == EndReg) 4342 continue; 4343 // The register must be in the same register class as the first. 4344 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { 4345 Error(AfterMinusLoc, "invalid register in register list"); 4346 return MatchOperand_ParseFail; 4347 } 4348 // Ranges must go from low to high. 4349 if (Reg > EndReg) { 4350 Error(AfterMinusLoc, "bad range in register list"); 4351 return MatchOperand_ParseFail; 4352 } 4353 // Parse the lane specifier if present. 4354 VectorLaneTy NextLaneKind; 4355 unsigned NextLaneIndex; 4356 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != 4357 MatchOperand_Success) 4358 return MatchOperand_ParseFail; 4359 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 4360 Error(AfterMinusLoc, "mismatched lane index in register list"); 4361 return MatchOperand_ParseFail; 4362 } 4363 4364 // Add all the registers in the range to the register list. 4365 Count += EndReg - Reg; 4366 Reg = EndReg; 4367 continue; 4368 } 4369 Parser.Lex(); // Eat the comma. 4370 RegLoc = Parser.getTok().getLoc(); 4371 int OldReg = Reg; 4372 Reg = tryParseRegister(); 4373 if (Reg == -1) { 4374 Error(RegLoc, "register expected"); 4375 return MatchOperand_ParseFail; 4376 } 4377 // vector register lists must be contiguous. 4378 // It's OK to use the enumeration values directly here rather, as the 4379 // VFP register classes have the enum sorted properly. 4380 // 4381 // The list is of D registers, but we also allow Q regs and just interpret 4382 // them as the two D sub-registers. 4383 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 4384 if (!Spacing) 4385 Spacing = 1; // Register range implies a single spaced list. 4386 else if (Spacing == 2) { 4387 Error(RegLoc, 4388 "invalid register in double-spaced list (must be 'D' register')"); 4389 return MatchOperand_ParseFail; 4390 } 4391 Reg = getDRegFromQReg(Reg); 4392 if (Reg != OldReg + 1) { 4393 Error(RegLoc, "non-contiguous register range"); 4394 return MatchOperand_ParseFail; 4395 } 4396 ++Reg; 4397 Count += 2; 4398 // Parse the lane specifier if present. 4399 VectorLaneTy NextLaneKind; 4400 unsigned NextLaneIndex; 4401 SMLoc LaneLoc = Parser.getTok().getLoc(); 4402 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != 4403 MatchOperand_Success) 4404 return MatchOperand_ParseFail; 4405 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 4406 Error(LaneLoc, "mismatched lane index in register list"); 4407 return MatchOperand_ParseFail; 4408 } 4409 continue; 4410 } 4411 // Normal D register. 4412 // Figure out the register spacing (single or double) of the list if 4413 // we don't know it already. 4414 if (!Spacing) 4415 Spacing = 1 + (Reg == OldReg + 2); 4416 4417 // Just check that it's contiguous and keep going. 4418 if (Reg != OldReg + Spacing) { 4419 Error(RegLoc, "non-contiguous register range"); 4420 return MatchOperand_ParseFail; 4421 } 4422 ++Count; 4423 // Parse the lane specifier if present. 4424 VectorLaneTy NextLaneKind; 4425 unsigned NextLaneIndex; 4426 SMLoc EndLoc = Parser.getTok().getLoc(); 4427 if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success) 4428 return MatchOperand_ParseFail; 4429 if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) { 4430 Error(EndLoc, "mismatched lane index in register list"); 4431 return MatchOperand_ParseFail; 4432 } 4433 } 4434 4435 if (Parser.getTok().isNot(AsmToken::RCurly)) { 4436 Error(Parser.getTok().getLoc(), "'}' expected"); 4437 return MatchOperand_ParseFail; 4438 } 4439 E = Parser.getTok().getEndLoc(); 4440 Parser.Lex(); // Eat '}' token. 4441 4442 switch (LaneKind) { 4443 case NoLanes: 4444 // Two-register operands have been converted to the 4445 // composite register classes. 4446 if (Count == 2) { 4447 const MCRegisterClass *RC = (Spacing == 1) ? 4448 &ARMMCRegisterClasses[ARM::DPairRegClassID] : 4449 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; 4450 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); 4451 } 4452 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, 4453 (Spacing == 2), S, E)); 4454 break; 4455 case AllLanes: 4456 // Two-register operands have been converted to the 4457 // composite register classes. 4458 if (Count == 2) { 4459 const MCRegisterClass *RC = (Spacing == 1) ? 4460 &ARMMCRegisterClasses[ARM::DPairRegClassID] : 4461 &ARMMCRegisterClasses[ARM::DPairSpcRegClassID]; 4462 FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC); 4463 } 4464 Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count, 4465 (Spacing == 2), 4466 S, E)); 4467 break; 4468 case IndexedLane: 4469 Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count, 4470 LaneIndex, 4471 (Spacing == 2), 4472 S, E)); 4473 break; 4474 } 4475 return MatchOperand_Success; 4476 } 4477 4478 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 4479 OperandMatchResultTy 4480 ARMAsmParser::parseMemBarrierOptOperand(OperandVector &Operands) { 4481 MCAsmParser &Parser = getParser(); 4482 SMLoc S = Parser.getTok().getLoc(); 4483 const AsmToken &Tok = Parser.getTok(); 4484 unsigned Opt; 4485 4486 if (Tok.is(AsmToken::Identifier)) { 4487 StringRef OptStr = Tok.getString(); 4488 4489 Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower()) 4490 .Case("sy", ARM_MB::SY) 4491 .Case("st", ARM_MB::ST) 4492 .Case("ld", ARM_MB::LD) 4493 .Case("sh", ARM_MB::ISH) 4494 .Case("ish", ARM_MB::ISH) 4495 .Case("shst", ARM_MB::ISHST) 4496 .Case("ishst", ARM_MB::ISHST) 4497 .Case("ishld", ARM_MB::ISHLD) 4498 .Case("nsh", ARM_MB::NSH) 4499 .Case("un", ARM_MB::NSH) 4500 .Case("nshst", ARM_MB::NSHST) 4501 .Case("nshld", ARM_MB::NSHLD) 4502 .Case("unst", ARM_MB::NSHST) 4503 .Case("osh", ARM_MB::OSH) 4504 .Case("oshst", ARM_MB::OSHST) 4505 .Case("oshld", ARM_MB::OSHLD) 4506 .Default(~0U); 4507 4508 // ishld, oshld, nshld and ld are only available from ARMv8. 4509 if (!hasV8Ops() && (Opt == ARM_MB::ISHLD || Opt == ARM_MB::OSHLD || 4510 Opt == ARM_MB::NSHLD || Opt == ARM_MB::LD)) 4511 Opt = ~0U; 4512 4513 if (Opt == ~0U) 4514 return MatchOperand_NoMatch; 4515 4516 Parser.Lex(); // Eat identifier token. 4517 } else if (Tok.is(AsmToken::Hash) || 4518 Tok.is(AsmToken::Dollar) || 4519 Tok.is(AsmToken::Integer)) { 4520 if (Parser.getTok().isNot(AsmToken::Integer)) 4521 Parser.Lex(); // Eat '#' or '$'. 4522 SMLoc Loc = Parser.getTok().getLoc(); 4523 4524 const MCExpr *MemBarrierID; 4525 if (getParser().parseExpression(MemBarrierID)) { 4526 Error(Loc, "illegal expression"); 4527 return MatchOperand_ParseFail; 4528 } 4529 4530 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID); 4531 if (!CE) { 4532 Error(Loc, "constant expression expected"); 4533 return MatchOperand_ParseFail; 4534 } 4535 4536 int Val = CE->getValue(); 4537 if (Val & ~0xf) { 4538 Error(Loc, "immediate value out of range"); 4539 return MatchOperand_ParseFail; 4540 } 4541 4542 Opt = ARM_MB::RESERVED_0 + Val; 4543 } else 4544 return MatchOperand_ParseFail; 4545 4546 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 4547 return MatchOperand_Success; 4548 } 4549 4550 OperandMatchResultTy 4551 ARMAsmParser::parseTraceSyncBarrierOptOperand(OperandVector &Operands) { 4552 MCAsmParser &Parser = getParser(); 4553 SMLoc S = Parser.getTok().getLoc(); 4554 const AsmToken &Tok = Parser.getTok(); 4555 4556 if (Tok.isNot(AsmToken::Identifier)) 4557 return MatchOperand_NoMatch; 4558 4559 if (!Tok.getString().equals_lower("csync")) 4560 return MatchOperand_NoMatch; 4561 4562 Parser.Lex(); // Eat identifier token. 4563 4564 Operands.push_back(ARMOperand::CreateTraceSyncBarrierOpt(ARM_TSB::CSYNC, S)); 4565 return MatchOperand_Success; 4566 } 4567 4568 /// parseInstSyncBarrierOptOperand - Try to parse ISB inst sync barrier options. 4569 OperandMatchResultTy 4570 ARMAsmParser::parseInstSyncBarrierOptOperand(OperandVector &Operands) { 4571 MCAsmParser &Parser = getParser(); 4572 SMLoc S = Parser.getTok().getLoc(); 4573 const AsmToken &Tok = Parser.getTok(); 4574 unsigned Opt; 4575 4576 if (Tok.is(AsmToken::Identifier)) { 4577 StringRef OptStr = Tok.getString(); 4578 4579 if (OptStr.equals_lower("sy")) 4580 Opt = ARM_ISB::SY; 4581 else 4582 return MatchOperand_NoMatch; 4583 4584 Parser.Lex(); // Eat identifier token. 4585 } else if (Tok.is(AsmToken::Hash) || 4586 Tok.is(AsmToken::Dollar) || 4587 Tok.is(AsmToken::Integer)) { 4588 if (Parser.getTok().isNot(AsmToken::Integer)) 4589 Parser.Lex(); // Eat '#' or '$'. 4590 SMLoc Loc = Parser.getTok().getLoc(); 4591 4592 const MCExpr *ISBarrierID; 4593 if (getParser().parseExpression(ISBarrierID)) { 4594 Error(Loc, "illegal expression"); 4595 return MatchOperand_ParseFail; 4596 } 4597 4598 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ISBarrierID); 4599 if (!CE) { 4600 Error(Loc, "constant expression expected"); 4601 return MatchOperand_ParseFail; 4602 } 4603 4604 int Val = CE->getValue(); 4605 if (Val & ~0xf) { 4606 Error(Loc, "immediate value out of range"); 4607 return MatchOperand_ParseFail; 4608 } 4609 4610 Opt = ARM_ISB::RESERVED_0 + Val; 4611 } else 4612 return MatchOperand_ParseFail; 4613 4614 Operands.push_back(ARMOperand::CreateInstSyncBarrierOpt( 4615 (ARM_ISB::InstSyncBOpt)Opt, S)); 4616 return MatchOperand_Success; 4617 } 4618 4619 4620 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 4621 OperandMatchResultTy 4622 ARMAsmParser::parseProcIFlagsOperand(OperandVector &Operands) { 4623 MCAsmParser &Parser = getParser(); 4624 SMLoc S = Parser.getTok().getLoc(); 4625 const AsmToken &Tok = Parser.getTok(); 4626 if (!Tok.is(AsmToken::Identifier)) 4627 return MatchOperand_NoMatch; 4628 StringRef IFlagsStr = Tok.getString(); 4629 4630 // An iflags string of "none" is interpreted to mean that none of the AIF 4631 // bits are set. Not a terribly useful instruction, but a valid encoding. 4632 unsigned IFlags = 0; 4633 if (IFlagsStr != "none") { 4634 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 4635 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1).lower()) 4636 .Case("a", ARM_PROC::A) 4637 .Case("i", ARM_PROC::I) 4638 .Case("f", ARM_PROC::F) 4639 .Default(~0U); 4640 4641 // If some specific iflag is already set, it means that some letter is 4642 // present more than once, this is not acceptable. 4643 if (Flag == ~0U || (IFlags & Flag)) 4644 return MatchOperand_NoMatch; 4645 4646 IFlags |= Flag; 4647 } 4648 } 4649 4650 Parser.Lex(); // Eat identifier token. 4651 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 4652 return MatchOperand_Success; 4653 } 4654 4655 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 4656 OperandMatchResultTy 4657 ARMAsmParser::parseMSRMaskOperand(OperandVector &Operands) { 4658 MCAsmParser &Parser = getParser(); 4659 SMLoc S = Parser.getTok().getLoc(); 4660 const AsmToken &Tok = Parser.getTok(); 4661 4662 if (Tok.is(AsmToken::Integer)) { 4663 int64_t Val = Tok.getIntVal(); 4664 if (Val > 255 || Val < 0) { 4665 return MatchOperand_NoMatch; 4666 } 4667 unsigned SYSmvalue = Val & 0xFF; 4668 Parser.Lex(); 4669 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S)); 4670 return MatchOperand_Success; 4671 } 4672 4673 if (!Tok.is(AsmToken::Identifier)) 4674 return MatchOperand_NoMatch; 4675 StringRef Mask = Tok.getString(); 4676 4677 if (isMClass()) { 4678 auto TheReg = ARMSysReg::lookupMClassSysRegByName(Mask.lower()); 4679 if (!TheReg || !TheReg->hasRequiredFeatures(getSTI().getFeatureBits())) 4680 return MatchOperand_NoMatch; 4681 4682 unsigned SYSmvalue = TheReg->Encoding & 0xFFF; 4683 4684 Parser.Lex(); // Eat identifier token. 4685 Operands.push_back(ARMOperand::CreateMSRMask(SYSmvalue, S)); 4686 return MatchOperand_Success; 4687 } 4688 4689 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 4690 size_t Start = 0, Next = Mask.find('_'); 4691 StringRef Flags = ""; 4692 std::string SpecReg = Mask.slice(Start, Next).lower(); 4693 if (Next != StringRef::npos) 4694 Flags = Mask.slice(Next+1, Mask.size()); 4695 4696 // FlagsVal contains the complete mask: 4697 // 3-0: Mask 4698 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 4699 unsigned FlagsVal = 0; 4700 4701 if (SpecReg == "apsr") { 4702 FlagsVal = StringSwitch<unsigned>(Flags) 4703 .Case("nzcvq", 0x8) // same as CPSR_f 4704 .Case("g", 0x4) // same as CPSR_s 4705 .Case("nzcvqg", 0xc) // same as CPSR_fs 4706 .Default(~0U); 4707 4708 if (FlagsVal == ~0U) { 4709 if (!Flags.empty()) 4710 return MatchOperand_NoMatch; 4711 else 4712 FlagsVal = 8; // No flag 4713 } 4714 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 4715 // cpsr_all is an alias for cpsr_fc, as is plain cpsr. 4716 if (Flags == "all" || Flags == "") 4717 Flags = "fc"; 4718 for (int i = 0, e = Flags.size(); i != e; ++i) { 4719 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 4720 .Case("c", 1) 4721 .Case("x", 2) 4722 .Case("s", 4) 4723 .Case("f", 8) 4724 .Default(~0U); 4725 4726 // If some specific flag is already set, it means that some letter is 4727 // present more than once, this is not acceptable. 4728 if (Flag == ~0U || (FlagsVal & Flag)) 4729 return MatchOperand_NoMatch; 4730 FlagsVal |= Flag; 4731 } 4732 } else // No match for special register. 4733 return MatchOperand_NoMatch; 4734 4735 // Special register without flags is NOT equivalent to "fc" flags. 4736 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 4737 // two lines would enable gas compatibility at the expense of breaking 4738 // round-tripping. 4739 // 4740 // if (!FlagsVal) 4741 // FlagsVal = 0x9; 4742 4743 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 4744 if (SpecReg == "spsr") 4745 FlagsVal |= 16; 4746 4747 Parser.Lex(); // Eat identifier token. 4748 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 4749 return MatchOperand_Success; 4750 } 4751 4752 /// parseBankedRegOperand - Try to parse a banked register (e.g. "lr_irq") for 4753 /// use in the MRS/MSR instructions added to support virtualization. 4754 OperandMatchResultTy 4755 ARMAsmParser::parseBankedRegOperand(OperandVector &Operands) { 4756 MCAsmParser &Parser = getParser(); 4757 SMLoc S = Parser.getTok().getLoc(); 4758 const AsmToken &Tok = Parser.getTok(); 4759 if (!Tok.is(AsmToken::Identifier)) 4760 return MatchOperand_NoMatch; 4761 StringRef RegName = Tok.getString(); 4762 4763 auto TheReg = ARMBankedReg::lookupBankedRegByName(RegName.lower()); 4764 if (!TheReg) 4765 return MatchOperand_NoMatch; 4766 unsigned Encoding = TheReg->Encoding; 4767 4768 Parser.Lex(); // Eat identifier token. 4769 Operands.push_back(ARMOperand::CreateBankedReg(Encoding, S)); 4770 return MatchOperand_Success; 4771 } 4772 4773 OperandMatchResultTy 4774 ARMAsmParser::parsePKHImm(OperandVector &Operands, StringRef Op, int Low, 4775 int High) { 4776 MCAsmParser &Parser = getParser(); 4777 const AsmToken &Tok = Parser.getTok(); 4778 if (Tok.isNot(AsmToken::Identifier)) { 4779 Error(Parser.getTok().getLoc(), Op + " operand expected."); 4780 return MatchOperand_ParseFail; 4781 } 4782 StringRef ShiftName = Tok.getString(); 4783 std::string LowerOp = Op.lower(); 4784 std::string UpperOp = Op.upper(); 4785 if (ShiftName != LowerOp && ShiftName != UpperOp) { 4786 Error(Parser.getTok().getLoc(), Op + " operand expected."); 4787 return MatchOperand_ParseFail; 4788 } 4789 Parser.Lex(); // Eat shift type token. 4790 4791 // There must be a '#' and a shift amount. 4792 if (Parser.getTok().isNot(AsmToken::Hash) && 4793 Parser.getTok().isNot(AsmToken::Dollar)) { 4794 Error(Parser.getTok().getLoc(), "'#' expected"); 4795 return MatchOperand_ParseFail; 4796 } 4797 Parser.Lex(); // Eat hash token. 4798 4799 const MCExpr *ShiftAmount; 4800 SMLoc Loc = Parser.getTok().getLoc(); 4801 SMLoc EndLoc; 4802 if (getParser().parseExpression(ShiftAmount, EndLoc)) { 4803 Error(Loc, "illegal expression"); 4804 return MatchOperand_ParseFail; 4805 } 4806 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 4807 if (!CE) { 4808 Error(Loc, "constant expression expected"); 4809 return MatchOperand_ParseFail; 4810 } 4811 int Val = CE->getValue(); 4812 if (Val < Low || Val > High) { 4813 Error(Loc, "immediate value out of range"); 4814 return MatchOperand_ParseFail; 4815 } 4816 4817 Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc)); 4818 4819 return MatchOperand_Success; 4820 } 4821 4822 OperandMatchResultTy 4823 ARMAsmParser::parseSetEndImm(OperandVector &Operands) { 4824 MCAsmParser &Parser = getParser(); 4825 const AsmToken &Tok = Parser.getTok(); 4826 SMLoc S = Tok.getLoc(); 4827 if (Tok.isNot(AsmToken::Identifier)) { 4828 Error(S, "'be' or 'le' operand expected"); 4829 return MatchOperand_ParseFail; 4830 } 4831 int Val = StringSwitch<int>(Tok.getString().lower()) 4832 .Case("be", 1) 4833 .Case("le", 0) 4834 .Default(-1); 4835 Parser.Lex(); // Eat the token. 4836 4837 if (Val == -1) { 4838 Error(S, "'be' or 'le' operand expected"); 4839 return MatchOperand_ParseFail; 4840 } 4841 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::create(Val, 4842 getContext()), 4843 S, Tok.getEndLoc())); 4844 return MatchOperand_Success; 4845 } 4846 4847 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 4848 /// instructions. Legal values are: 4849 /// lsl #n 'n' in [0,31] 4850 /// asr #n 'n' in [1,32] 4851 /// n == 32 encoded as n == 0. 4852 OperandMatchResultTy 4853 ARMAsmParser::parseShifterImm(OperandVector &Operands) { 4854 MCAsmParser &Parser = getParser(); 4855 const AsmToken &Tok = Parser.getTok(); 4856 SMLoc S = Tok.getLoc(); 4857 if (Tok.isNot(AsmToken::Identifier)) { 4858 Error(S, "shift operator 'asr' or 'lsl' expected"); 4859 return MatchOperand_ParseFail; 4860 } 4861 StringRef ShiftName = Tok.getString(); 4862 bool isASR; 4863 if (ShiftName == "lsl" || ShiftName == "LSL") 4864 isASR = false; 4865 else if (ShiftName == "asr" || ShiftName == "ASR") 4866 isASR = true; 4867 else { 4868 Error(S, "shift operator 'asr' or 'lsl' expected"); 4869 return MatchOperand_ParseFail; 4870 } 4871 Parser.Lex(); // Eat the operator. 4872 4873 // A '#' and a shift amount. 4874 if (Parser.getTok().isNot(AsmToken::Hash) && 4875 Parser.getTok().isNot(AsmToken::Dollar)) { 4876 Error(Parser.getTok().getLoc(), "'#' expected"); 4877 return MatchOperand_ParseFail; 4878 } 4879 Parser.Lex(); // Eat hash token. 4880 SMLoc ExLoc = Parser.getTok().getLoc(); 4881 4882 const MCExpr *ShiftAmount; 4883 SMLoc EndLoc; 4884 if (getParser().parseExpression(ShiftAmount, EndLoc)) { 4885 Error(ExLoc, "malformed shift expression"); 4886 return MatchOperand_ParseFail; 4887 } 4888 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 4889 if (!CE) { 4890 Error(ExLoc, "shift amount must be an immediate"); 4891 return MatchOperand_ParseFail; 4892 } 4893 4894 int64_t Val = CE->getValue(); 4895 if (isASR) { 4896 // Shift amount must be in [1,32] 4897 if (Val < 1 || Val > 32) { 4898 Error(ExLoc, "'asr' shift amount must be in range [1,32]"); 4899 return MatchOperand_ParseFail; 4900 } 4901 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 4902 if (isThumb() && Val == 32) { 4903 Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode"); 4904 return MatchOperand_ParseFail; 4905 } 4906 if (Val == 32) Val = 0; 4907 } else { 4908 // Shift amount must be in [1,32] 4909 if (Val < 0 || Val > 31) { 4910 Error(ExLoc, "'lsr' shift amount must be in range [0,31]"); 4911 return MatchOperand_ParseFail; 4912 } 4913 } 4914 4915 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc)); 4916 4917 return MatchOperand_Success; 4918 } 4919 4920 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 4921 /// of instructions. Legal values are: 4922 /// ror #n 'n' in {0, 8, 16, 24} 4923 OperandMatchResultTy 4924 ARMAsmParser::parseRotImm(OperandVector &Operands) { 4925 MCAsmParser &Parser = getParser(); 4926 const AsmToken &Tok = Parser.getTok(); 4927 SMLoc S = Tok.getLoc(); 4928 if (Tok.isNot(AsmToken::Identifier)) 4929 return MatchOperand_NoMatch; 4930 StringRef ShiftName = Tok.getString(); 4931 if (ShiftName != "ror" && ShiftName != "ROR") 4932 return MatchOperand_NoMatch; 4933 Parser.Lex(); // Eat the operator. 4934 4935 // A '#' and a rotate amount. 4936 if (Parser.getTok().isNot(AsmToken::Hash) && 4937 Parser.getTok().isNot(AsmToken::Dollar)) { 4938 Error(Parser.getTok().getLoc(), "'#' expected"); 4939 return MatchOperand_ParseFail; 4940 } 4941 Parser.Lex(); // Eat hash token. 4942 SMLoc ExLoc = Parser.getTok().getLoc(); 4943 4944 const MCExpr *ShiftAmount; 4945 SMLoc EndLoc; 4946 if (getParser().parseExpression(ShiftAmount, EndLoc)) { 4947 Error(ExLoc, "malformed rotate expression"); 4948 return MatchOperand_ParseFail; 4949 } 4950 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 4951 if (!CE) { 4952 Error(ExLoc, "rotate amount must be an immediate"); 4953 return MatchOperand_ParseFail; 4954 } 4955 4956 int64_t Val = CE->getValue(); 4957 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 4958 // normally, zero is represented in asm by omitting the rotate operand 4959 // entirely. 4960 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 4961 Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24"); 4962 return MatchOperand_ParseFail; 4963 } 4964 4965 Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc)); 4966 4967 return MatchOperand_Success; 4968 } 4969 4970 OperandMatchResultTy 4971 ARMAsmParser::parseModImm(OperandVector &Operands) { 4972 MCAsmParser &Parser = getParser(); 4973 MCAsmLexer &Lexer = getLexer(); 4974 int64_t Imm1, Imm2; 4975 4976 SMLoc S = Parser.getTok().getLoc(); 4977 4978 // 1) A mod_imm operand can appear in the place of a register name: 4979 // add r0, #mod_imm 4980 // add r0, r0, #mod_imm 4981 // to correctly handle the latter, we bail out as soon as we see an 4982 // identifier. 4983 // 4984 // 2) Similarly, we do not want to parse into complex operands: 4985 // mov r0, #mod_imm 4986 // mov r0, :lower16:(_foo) 4987 if (Parser.getTok().is(AsmToken::Identifier) || 4988 Parser.getTok().is(AsmToken::Colon)) 4989 return MatchOperand_NoMatch; 4990 4991 // Hash (dollar) is optional as per the ARMARM 4992 if (Parser.getTok().is(AsmToken::Hash) || 4993 Parser.getTok().is(AsmToken::Dollar)) { 4994 // Avoid parsing into complex operands (#:) 4995 if (Lexer.peekTok().is(AsmToken::Colon)) 4996 return MatchOperand_NoMatch; 4997 4998 // Eat the hash (dollar) 4999 Parser.Lex(); 5000 } 5001 5002 SMLoc Sx1, Ex1; 5003 Sx1 = Parser.getTok().getLoc(); 5004 const MCExpr *Imm1Exp; 5005 if (getParser().parseExpression(Imm1Exp, Ex1)) { 5006 Error(Sx1, "malformed expression"); 5007 return MatchOperand_ParseFail; 5008 } 5009 5010 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm1Exp); 5011 5012 if (CE) { 5013 // Immediate must fit within 32-bits 5014 Imm1 = CE->getValue(); 5015 int Enc = ARM_AM::getSOImmVal(Imm1); 5016 if (Enc != -1 && Parser.getTok().is(AsmToken::EndOfStatement)) { 5017 // We have a match! 5018 Operands.push_back(ARMOperand::CreateModImm((Enc & 0xFF), 5019 (Enc & 0xF00) >> 7, 5020 Sx1, Ex1)); 5021 return MatchOperand_Success; 5022 } 5023 5024 // We have parsed an immediate which is not for us, fallback to a plain 5025 // immediate. This can happen for instruction aliases. For an example, 5026 // ARMInstrInfo.td defines the alias [mov <-> mvn] which can transform 5027 // a mov (mvn) with a mod_imm_neg/mod_imm_not operand into the opposite 5028 // instruction with a mod_imm operand. The alias is defined such that the 5029 // parser method is shared, that's why we have to do this here. 5030 if (Parser.getTok().is(AsmToken::EndOfStatement)) { 5031 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1)); 5032 return MatchOperand_Success; 5033 } 5034 } else { 5035 // Operands like #(l1 - l2) can only be evaluated at a later stage (via an 5036 // MCFixup). Fallback to a plain immediate. 5037 Operands.push_back(ARMOperand::CreateImm(Imm1Exp, Sx1, Ex1)); 5038 return MatchOperand_Success; 5039 } 5040 5041 // From this point onward, we expect the input to be a (#bits, #rot) pair 5042 if (Parser.getTok().isNot(AsmToken::Comma)) { 5043 Error(Sx1, "expected modified immediate operand: #[0, 255], #even[0-30]"); 5044 return MatchOperand_ParseFail; 5045 } 5046 5047 if (Imm1 & ~0xFF) { 5048 Error(Sx1, "immediate operand must a number in the range [0, 255]"); 5049 return MatchOperand_ParseFail; 5050 } 5051 5052 // Eat the comma 5053 Parser.Lex(); 5054 5055 // Repeat for #rot 5056 SMLoc Sx2, Ex2; 5057 Sx2 = Parser.getTok().getLoc(); 5058 5059 // Eat the optional hash (dollar) 5060 if (Parser.getTok().is(AsmToken::Hash) || 5061 Parser.getTok().is(AsmToken::Dollar)) 5062 Parser.Lex(); 5063 5064 const MCExpr *Imm2Exp; 5065 if (getParser().parseExpression(Imm2Exp, Ex2)) { 5066 Error(Sx2, "malformed expression"); 5067 return MatchOperand_ParseFail; 5068 } 5069 5070 CE = dyn_cast<MCConstantExpr>(Imm2Exp); 5071 5072 if (CE) { 5073 Imm2 = CE->getValue(); 5074 if (!(Imm2 & ~0x1E)) { 5075 // We have a match! 5076 Operands.push_back(ARMOperand::CreateModImm(Imm1, Imm2, S, Ex2)); 5077 return MatchOperand_Success; 5078 } 5079 Error(Sx2, "immediate operand must an even number in the range [0, 30]"); 5080 return MatchOperand_ParseFail; 5081 } else { 5082 Error(Sx2, "constant expression expected"); 5083 return MatchOperand_ParseFail; 5084 } 5085 } 5086 5087 OperandMatchResultTy 5088 ARMAsmParser::parseBitfield(OperandVector &Operands) { 5089 MCAsmParser &Parser = getParser(); 5090 SMLoc S = Parser.getTok().getLoc(); 5091 // The bitfield descriptor is really two operands, the LSB and the width. 5092 if (Parser.getTok().isNot(AsmToken::Hash) && 5093 Parser.getTok().isNot(AsmToken::Dollar)) { 5094 Error(Parser.getTok().getLoc(), "'#' expected"); 5095 return MatchOperand_ParseFail; 5096 } 5097 Parser.Lex(); // Eat hash token. 5098 5099 const MCExpr *LSBExpr; 5100 SMLoc E = Parser.getTok().getLoc(); 5101 if (getParser().parseExpression(LSBExpr)) { 5102 Error(E, "malformed immediate expression"); 5103 return MatchOperand_ParseFail; 5104 } 5105 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 5106 if (!CE) { 5107 Error(E, "'lsb' operand must be an immediate"); 5108 return MatchOperand_ParseFail; 5109 } 5110 5111 int64_t LSB = CE->getValue(); 5112 // The LSB must be in the range [0,31] 5113 if (LSB < 0 || LSB > 31) { 5114 Error(E, "'lsb' operand must be in the range [0,31]"); 5115 return MatchOperand_ParseFail; 5116 } 5117 E = Parser.getTok().getLoc(); 5118 5119 // Expect another immediate operand. 5120 if (Parser.getTok().isNot(AsmToken::Comma)) { 5121 Error(Parser.getTok().getLoc(), "too few operands"); 5122 return MatchOperand_ParseFail; 5123 } 5124 Parser.Lex(); // Eat hash token. 5125 if (Parser.getTok().isNot(AsmToken::Hash) && 5126 Parser.getTok().isNot(AsmToken::Dollar)) { 5127 Error(Parser.getTok().getLoc(), "'#' expected"); 5128 return MatchOperand_ParseFail; 5129 } 5130 Parser.Lex(); // Eat hash token. 5131 5132 const MCExpr *WidthExpr; 5133 SMLoc EndLoc; 5134 if (getParser().parseExpression(WidthExpr, EndLoc)) { 5135 Error(E, "malformed immediate expression"); 5136 return MatchOperand_ParseFail; 5137 } 5138 CE = dyn_cast<MCConstantExpr>(WidthExpr); 5139 if (!CE) { 5140 Error(E, "'width' operand must be an immediate"); 5141 return MatchOperand_ParseFail; 5142 } 5143 5144 int64_t Width = CE->getValue(); 5145 // The LSB must be in the range [1,32-lsb] 5146 if (Width < 1 || Width > 32 - LSB) { 5147 Error(E, "'width' operand must be in the range [1,32-lsb]"); 5148 return MatchOperand_ParseFail; 5149 } 5150 5151 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc)); 5152 5153 return MatchOperand_Success; 5154 } 5155 5156 OperandMatchResultTy 5157 ARMAsmParser::parsePostIdxReg(OperandVector &Operands) { 5158 // Check for a post-index addressing register operand. Specifically: 5159 // postidx_reg := '+' register {, shift} 5160 // | '-' register {, shift} 5161 // | register {, shift} 5162 5163 // This method must return MatchOperand_NoMatch without consuming any tokens 5164 // in the case where there is no match, as other alternatives take other 5165 // parse methods. 5166 MCAsmParser &Parser = getParser(); 5167 AsmToken Tok = Parser.getTok(); 5168 SMLoc S = Tok.getLoc(); 5169 bool haveEaten = false; 5170 bool isAdd = true; 5171 if (Tok.is(AsmToken::Plus)) { 5172 Parser.Lex(); // Eat the '+' token. 5173 haveEaten = true; 5174 } else if (Tok.is(AsmToken::Minus)) { 5175 Parser.Lex(); // Eat the '-' token. 5176 isAdd = false; 5177 haveEaten = true; 5178 } 5179 5180 SMLoc E = Parser.getTok().getEndLoc(); 5181 int Reg = tryParseRegister(); 5182 if (Reg == -1) { 5183 if (!haveEaten) 5184 return MatchOperand_NoMatch; 5185 Error(Parser.getTok().getLoc(), "register expected"); 5186 return MatchOperand_ParseFail; 5187 } 5188 5189 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 5190 unsigned ShiftImm = 0; 5191 if (Parser.getTok().is(AsmToken::Comma)) { 5192 Parser.Lex(); // Eat the ','. 5193 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 5194 return MatchOperand_ParseFail; 5195 5196 // FIXME: Only approximates end...may include intervening whitespace. 5197 E = Parser.getTok().getLoc(); 5198 } 5199 5200 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 5201 ShiftImm, S, E)); 5202 5203 return MatchOperand_Success; 5204 } 5205 5206 OperandMatchResultTy 5207 ARMAsmParser::parseAM3Offset(OperandVector &Operands) { 5208 // Check for a post-index addressing register operand. Specifically: 5209 // am3offset := '+' register 5210 // | '-' register 5211 // | register 5212 // | # imm 5213 // | # + imm 5214 // | # - imm 5215 5216 // This method must return MatchOperand_NoMatch without consuming any tokens 5217 // in the case where there is no match, as other alternatives take other 5218 // parse methods. 5219 MCAsmParser &Parser = getParser(); 5220 AsmToken Tok = Parser.getTok(); 5221 SMLoc S = Tok.getLoc(); 5222 5223 // Do immediates first, as we always parse those if we have a '#'. 5224 if (Parser.getTok().is(AsmToken::Hash) || 5225 Parser.getTok().is(AsmToken::Dollar)) { 5226 Parser.Lex(); // Eat '#' or '$'. 5227 // Explicitly look for a '-', as we need to encode negative zero 5228 // differently. 5229 bool isNegative = Parser.getTok().is(AsmToken::Minus); 5230 const MCExpr *Offset; 5231 SMLoc E; 5232 if (getParser().parseExpression(Offset, E)) 5233 return MatchOperand_ParseFail; 5234 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 5235 if (!CE) { 5236 Error(S, "constant expression expected"); 5237 return MatchOperand_ParseFail; 5238 } 5239 // Negative zero is encoded as the flag value 5240 // std::numeric_limits<int32_t>::min(). 5241 int32_t Val = CE->getValue(); 5242 if (isNegative && Val == 0) 5243 Val = std::numeric_limits<int32_t>::min(); 5244 5245 Operands.push_back( 5246 ARMOperand::CreateImm(MCConstantExpr::create(Val, getContext()), S, E)); 5247 5248 return MatchOperand_Success; 5249 } 5250 5251 bool haveEaten = false; 5252 bool isAdd = true; 5253 if (Tok.is(AsmToken::Plus)) { 5254 Parser.Lex(); // Eat the '+' token. 5255 haveEaten = true; 5256 } else if (Tok.is(AsmToken::Minus)) { 5257 Parser.Lex(); // Eat the '-' token. 5258 isAdd = false; 5259 haveEaten = true; 5260 } 5261 5262 Tok = Parser.getTok(); 5263 int Reg = tryParseRegister(); 5264 if (Reg == -1) { 5265 if (!haveEaten) 5266 return MatchOperand_NoMatch; 5267 Error(Tok.getLoc(), "register expected"); 5268 return MatchOperand_ParseFail; 5269 } 5270 5271 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 5272 0, S, Tok.getEndLoc())); 5273 5274 return MatchOperand_Success; 5275 } 5276 5277 /// Convert parsed operands to MCInst. Needed here because this instruction 5278 /// only has two register operands, but multiplication is commutative so 5279 /// assemblers should accept both "mul rD, rN, rD" and "mul rD, rD, rN". 5280 void ARMAsmParser::cvtThumbMultiply(MCInst &Inst, 5281 const OperandVector &Operands) { 5282 ((ARMOperand &)*Operands[3]).addRegOperands(Inst, 1); 5283 ((ARMOperand &)*Operands[1]).addCCOutOperands(Inst, 1); 5284 // If we have a three-operand form, make sure to set Rn to be the operand 5285 // that isn't the same as Rd. 5286 unsigned RegOp = 4; 5287 if (Operands.size() == 6 && 5288 ((ARMOperand &)*Operands[4]).getReg() == 5289 ((ARMOperand &)*Operands[3]).getReg()) 5290 RegOp = 5; 5291 ((ARMOperand &)*Operands[RegOp]).addRegOperands(Inst, 1); 5292 Inst.addOperand(Inst.getOperand(0)); 5293 ((ARMOperand &)*Operands[2]).addCondCodeOperands(Inst, 2); 5294 } 5295 5296 void ARMAsmParser::cvtThumbBranches(MCInst &Inst, 5297 const OperandVector &Operands) { 5298 int CondOp = -1, ImmOp = -1; 5299 switch(Inst.getOpcode()) { 5300 case ARM::tB: 5301 case ARM::tBcc: CondOp = 1; ImmOp = 2; break; 5302 5303 case ARM::t2B: 5304 case ARM::t2Bcc: CondOp = 1; ImmOp = 3; break; 5305 5306 default: llvm_unreachable("Unexpected instruction in cvtThumbBranches"); 5307 } 5308 // first decide whether or not the branch should be conditional 5309 // by looking at it's location relative to an IT block 5310 if(inITBlock()) { 5311 // inside an IT block we cannot have any conditional branches. any 5312 // such instructions needs to be converted to unconditional form 5313 switch(Inst.getOpcode()) { 5314 case ARM::tBcc: Inst.setOpcode(ARM::tB); break; 5315 case ARM::t2Bcc: Inst.setOpcode(ARM::t2B); break; 5316 } 5317 } else { 5318 // outside IT blocks we can only have unconditional branches with AL 5319 // condition code or conditional branches with non-AL condition code 5320 unsigned Cond = static_cast<ARMOperand &>(*Operands[CondOp]).getCondCode(); 5321 switch(Inst.getOpcode()) { 5322 case ARM::tB: 5323 case ARM::tBcc: 5324 Inst.setOpcode(Cond == ARMCC::AL ? ARM::tB : ARM::tBcc); 5325 break; 5326 case ARM::t2B: 5327 case ARM::t2Bcc: 5328 Inst.setOpcode(Cond == ARMCC::AL ? ARM::t2B : ARM::t2Bcc); 5329 break; 5330 } 5331 } 5332 5333 // now decide on encoding size based on branch target range 5334 switch(Inst.getOpcode()) { 5335 // classify tB as either t2B or t1B based on range of immediate operand 5336 case ARM::tB: { 5337 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]); 5338 if (!op.isSignedOffset<11, 1>() && isThumb() && hasV8MBaseline()) 5339 Inst.setOpcode(ARM::t2B); 5340 break; 5341 } 5342 // classify tBcc as either t2Bcc or t1Bcc based on range of immediate operand 5343 case ARM::tBcc: { 5344 ARMOperand &op = static_cast<ARMOperand &>(*Operands[ImmOp]); 5345 if (!op.isSignedOffset<8, 1>() && isThumb() && hasV8MBaseline()) 5346 Inst.setOpcode(ARM::t2Bcc); 5347 break; 5348 } 5349 } 5350 ((ARMOperand &)*Operands[ImmOp]).addImmOperands(Inst, 1); 5351 ((ARMOperand &)*Operands[CondOp]).addCondCodeOperands(Inst, 2); 5352 } 5353 5354 /// Parse an ARM memory expression, return false if successful else return true 5355 /// or an error. The first token must be a '[' when called. 5356 bool ARMAsmParser::parseMemory(OperandVector &Operands) { 5357 MCAsmParser &Parser = getParser(); 5358 SMLoc S, E; 5359 if (Parser.getTok().isNot(AsmToken::LBrac)) 5360 return TokError("Token is not a Left Bracket"); 5361 S = Parser.getTok().getLoc(); 5362 Parser.Lex(); // Eat left bracket token. 5363 5364 const AsmToken &BaseRegTok = Parser.getTok(); 5365 int BaseRegNum = tryParseRegister(); 5366 if (BaseRegNum == -1) 5367 return Error(BaseRegTok.getLoc(), "register expected"); 5368 5369 // The next token must either be a comma, a colon or a closing bracket. 5370 const AsmToken &Tok = Parser.getTok(); 5371 if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) && 5372 !Tok.is(AsmToken::RBrac)) 5373 return Error(Tok.getLoc(), "malformed memory operand"); 5374 5375 if (Tok.is(AsmToken::RBrac)) { 5376 E = Tok.getEndLoc(); 5377 Parser.Lex(); // Eat right bracket token. 5378 5379 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0, 5380 ARM_AM::no_shift, 0, 0, false, 5381 S, E)); 5382 5383 // If there's a pre-indexing writeback marker, '!', just add it as a token 5384 // operand. It's rather odd, but syntactically valid. 5385 if (Parser.getTok().is(AsmToken::Exclaim)) { 5386 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 5387 Parser.Lex(); // Eat the '!'. 5388 } 5389 5390 return false; 5391 } 5392 5393 assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) && 5394 "Lost colon or comma in memory operand?!"); 5395 if (Tok.is(AsmToken::Comma)) { 5396 Parser.Lex(); // Eat the comma. 5397 } 5398 5399 // If we have a ':', it's an alignment specifier. 5400 if (Parser.getTok().is(AsmToken::Colon)) { 5401 Parser.Lex(); // Eat the ':'. 5402 E = Parser.getTok().getLoc(); 5403 SMLoc AlignmentLoc = Tok.getLoc(); 5404 5405 const MCExpr *Expr; 5406 if (getParser().parseExpression(Expr)) 5407 return true; 5408 5409 // The expression has to be a constant. Memory references with relocations 5410 // don't come through here, as they use the <label> forms of the relevant 5411 // instructions. 5412 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 5413 if (!CE) 5414 return Error (E, "constant expression expected"); 5415 5416 unsigned Align = 0; 5417 switch (CE->getValue()) { 5418 default: 5419 return Error(E, 5420 "alignment specifier must be 16, 32, 64, 128, or 256 bits"); 5421 case 16: Align = 2; break; 5422 case 32: Align = 4; break; 5423 case 64: Align = 8; break; 5424 case 128: Align = 16; break; 5425 case 256: Align = 32; break; 5426 } 5427 5428 // Now we should have the closing ']' 5429 if (Parser.getTok().isNot(AsmToken::RBrac)) 5430 return Error(Parser.getTok().getLoc(), "']' expected"); 5431 E = Parser.getTok().getEndLoc(); 5432 Parser.Lex(); // Eat right bracket token. 5433 5434 // Don't worry about range checking the value here. That's handled by 5435 // the is*() predicates. 5436 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, 0, 5437 ARM_AM::no_shift, 0, Align, 5438 false, S, E, AlignmentLoc)); 5439 5440 // If there's a pre-indexing writeback marker, '!', just add it as a token 5441 // operand. 5442 if (Parser.getTok().is(AsmToken::Exclaim)) { 5443 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 5444 Parser.Lex(); // Eat the '!'. 5445 } 5446 5447 return false; 5448 } 5449 5450 // If we have a '#', it's an immediate offset, else assume it's a register 5451 // offset. Be friendly and also accept a plain integer (without a leading 5452 // hash) for gas compatibility. 5453 if (Parser.getTok().is(AsmToken::Hash) || 5454 Parser.getTok().is(AsmToken::Dollar) || 5455 Parser.getTok().is(AsmToken::Integer)) { 5456 if (Parser.getTok().isNot(AsmToken::Integer)) 5457 Parser.Lex(); // Eat '#' or '$'. 5458 E = Parser.getTok().getLoc(); 5459 5460 bool isNegative = getParser().getTok().is(AsmToken::Minus); 5461 const MCExpr *Offset; 5462 if (getParser().parseExpression(Offset)) 5463 return true; 5464 5465 // The expression has to be a constant. Memory references with relocations 5466 // don't come through here, as they use the <label> forms of the relevant 5467 // instructions. 5468 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 5469 if (!CE) 5470 return Error (E, "constant expression expected"); 5471 5472 // If the constant was #-0, represent it as 5473 // std::numeric_limits<int32_t>::min(). 5474 int32_t Val = CE->getValue(); 5475 if (isNegative && Val == 0) 5476 CE = MCConstantExpr::create(std::numeric_limits<int32_t>::min(), 5477 getContext()); 5478 5479 // Now we should have the closing ']' 5480 if (Parser.getTok().isNot(AsmToken::RBrac)) 5481 return Error(Parser.getTok().getLoc(), "']' expected"); 5482 E = Parser.getTok().getEndLoc(); 5483 Parser.Lex(); // Eat right bracket token. 5484 5485 // Don't worry about range checking the value here. That's handled by 5486 // the is*() predicates. 5487 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 5488 ARM_AM::no_shift, 0, 0, 5489 false, S, E)); 5490 5491 // If there's a pre-indexing writeback marker, '!', just add it as a token 5492 // operand. 5493 if (Parser.getTok().is(AsmToken::Exclaim)) { 5494 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 5495 Parser.Lex(); // Eat the '!'. 5496 } 5497 5498 return false; 5499 } 5500 5501 // The register offset is optionally preceded by a '+' or '-' 5502 bool isNegative = false; 5503 if (Parser.getTok().is(AsmToken::Minus)) { 5504 isNegative = true; 5505 Parser.Lex(); // Eat the '-'. 5506 } else if (Parser.getTok().is(AsmToken::Plus)) { 5507 // Nothing to do. 5508 Parser.Lex(); // Eat the '+'. 5509 } 5510 5511 E = Parser.getTok().getLoc(); 5512 int OffsetRegNum = tryParseRegister(); 5513 if (OffsetRegNum == -1) 5514 return Error(E, "register expected"); 5515 5516 // If there's a shift operator, handle it. 5517 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 5518 unsigned ShiftImm = 0; 5519 if (Parser.getTok().is(AsmToken::Comma)) { 5520 Parser.Lex(); // Eat the ','. 5521 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 5522 return true; 5523 } 5524 5525 // Now we should have the closing ']' 5526 if (Parser.getTok().isNot(AsmToken::RBrac)) 5527 return Error(Parser.getTok().getLoc(), "']' expected"); 5528 E = Parser.getTok().getEndLoc(); 5529 Parser.Lex(); // Eat right bracket token. 5530 5531 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, nullptr, OffsetRegNum, 5532 ShiftType, ShiftImm, 0, isNegative, 5533 S, E)); 5534 5535 // If there's a pre-indexing writeback marker, '!', just add it as a token 5536 // operand. 5537 if (Parser.getTok().is(AsmToken::Exclaim)) { 5538 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 5539 Parser.Lex(); // Eat the '!'. 5540 } 5541 5542 return false; 5543 } 5544 5545 /// parseMemRegOffsetShift - one of these two: 5546 /// ( lsl | lsr | asr | ror ) , # shift_amount 5547 /// rrx 5548 /// return true if it parses a shift otherwise it returns false. 5549 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 5550 unsigned &Amount) { 5551 MCAsmParser &Parser = getParser(); 5552 SMLoc Loc = Parser.getTok().getLoc(); 5553 const AsmToken &Tok = Parser.getTok(); 5554 if (Tok.isNot(AsmToken::Identifier)) 5555 return Error(Loc, "illegal shift operator"); 5556 StringRef ShiftName = Tok.getString(); 5557 if (ShiftName == "lsl" || ShiftName == "LSL" || 5558 ShiftName == "asl" || ShiftName == "ASL") 5559 St = ARM_AM::lsl; 5560 else if (ShiftName == "lsr" || ShiftName == "LSR") 5561 St = ARM_AM::lsr; 5562 else if (ShiftName == "asr" || ShiftName == "ASR") 5563 St = ARM_AM::asr; 5564 else if (ShiftName == "ror" || ShiftName == "ROR") 5565 St = ARM_AM::ror; 5566 else if (ShiftName == "rrx" || ShiftName == "RRX") 5567 St = ARM_AM::rrx; 5568 else 5569 return Error(Loc, "illegal shift operator"); 5570 Parser.Lex(); // Eat shift type token. 5571 5572 // rrx stands alone. 5573 Amount = 0; 5574 if (St != ARM_AM::rrx) { 5575 Loc = Parser.getTok().getLoc(); 5576 // A '#' and a shift amount. 5577 const AsmToken &HashTok = Parser.getTok(); 5578 if (HashTok.isNot(AsmToken::Hash) && 5579 HashTok.isNot(AsmToken::Dollar)) 5580 return Error(HashTok.getLoc(), "'#' expected"); 5581 Parser.Lex(); // Eat hash token. 5582 5583 const MCExpr *Expr; 5584 if (getParser().parseExpression(Expr)) 5585 return true; 5586 // Range check the immediate. 5587 // lsl, ror: 0 <= imm <= 31 5588 // lsr, asr: 0 <= imm <= 32 5589 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 5590 if (!CE) 5591 return Error(Loc, "shift amount must be an immediate"); 5592 int64_t Imm = CE->getValue(); 5593 if (Imm < 0 || 5594 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 5595 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 5596 return Error(Loc, "immediate shift value out of range"); 5597 // If <ShiftTy> #0, turn it into a no_shift. 5598 if (Imm == 0) 5599 St = ARM_AM::lsl; 5600 // For consistency, treat lsr #32 and asr #32 as having immediate value 0. 5601 if (Imm == 32) 5602 Imm = 0; 5603 Amount = Imm; 5604 } 5605 5606 return false; 5607 } 5608 5609 /// parseFPImm - A floating point immediate expression operand. 5610 OperandMatchResultTy 5611 ARMAsmParser::parseFPImm(OperandVector &Operands) { 5612 MCAsmParser &Parser = getParser(); 5613 // Anything that can accept a floating point constant as an operand 5614 // needs to go through here, as the regular parseExpression is 5615 // integer only. 5616 // 5617 // This routine still creates a generic Immediate operand, containing 5618 // a bitcast of the 64-bit floating point value. The various operands 5619 // that accept floats can check whether the value is valid for them 5620 // via the standard is*() predicates. 5621 5622 SMLoc S = Parser.getTok().getLoc(); 5623 5624 if (Parser.getTok().isNot(AsmToken::Hash) && 5625 Parser.getTok().isNot(AsmToken::Dollar)) 5626 return MatchOperand_NoMatch; 5627 5628 // Disambiguate the VMOV forms that can accept an FP immediate. 5629 // vmov.f32 <sreg>, #imm 5630 // vmov.f64 <dreg>, #imm 5631 // vmov.f32 <dreg>, #imm @ vector f32x2 5632 // vmov.f32 <qreg>, #imm @ vector f32x4 5633 // 5634 // There are also the NEON VMOV instructions which expect an 5635 // integer constant. Make sure we don't try to parse an FPImm 5636 // for these: 5637 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 5638 ARMOperand &TyOp = static_cast<ARMOperand &>(*Operands[2]); 5639 bool isVmovf = TyOp.isToken() && 5640 (TyOp.getToken() == ".f32" || TyOp.getToken() == ".f64" || 5641 TyOp.getToken() == ".f16"); 5642 ARMOperand &Mnemonic = static_cast<ARMOperand &>(*Operands[0]); 5643 bool isFconst = Mnemonic.isToken() && (Mnemonic.getToken() == "fconstd" || 5644 Mnemonic.getToken() == "fconsts"); 5645 if (!(isVmovf || isFconst)) 5646 return MatchOperand_NoMatch; 5647 5648 Parser.Lex(); // Eat '#' or '$'. 5649 5650 // Handle negation, as that still comes through as a separate token. 5651 bool isNegative = false; 5652 if (Parser.getTok().is(AsmToken::Minus)) { 5653 isNegative = true; 5654 Parser.Lex(); 5655 } 5656 const AsmToken &Tok = Parser.getTok(); 5657 SMLoc Loc = Tok.getLoc(); 5658 if (Tok.is(AsmToken::Real) && isVmovf) { 5659 APFloat RealVal(APFloat::IEEEsingle(), Tok.getString()); 5660 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 5661 // If we had a '-' in front, toggle the sign bit. 5662 IntVal ^= (uint64_t)isNegative << 31; 5663 Parser.Lex(); // Eat the token. 5664 Operands.push_back(ARMOperand::CreateImm( 5665 MCConstantExpr::create(IntVal, getContext()), 5666 S, Parser.getTok().getLoc())); 5667 return MatchOperand_Success; 5668 } 5669 // Also handle plain integers. Instructions which allow floating point 5670 // immediates also allow a raw encoded 8-bit value. 5671 if (Tok.is(AsmToken::Integer) && isFconst) { 5672 int64_t Val = Tok.getIntVal(); 5673 Parser.Lex(); // Eat the token. 5674 if (Val > 255 || Val < 0) { 5675 Error(Loc, "encoded floating point value out of range"); 5676 return MatchOperand_ParseFail; 5677 } 5678 float RealVal = ARM_AM::getFPImmFloat(Val); 5679 Val = APFloat(RealVal).bitcastToAPInt().getZExtValue(); 5680 5681 Operands.push_back(ARMOperand::CreateImm( 5682 MCConstantExpr::create(Val, getContext()), S, 5683 Parser.getTok().getLoc())); 5684 return MatchOperand_Success; 5685 } 5686 5687 Error(Loc, "invalid floating point immediate"); 5688 return MatchOperand_ParseFail; 5689 } 5690 5691 /// Parse a arm instruction operand. For now this parses the operand regardless 5692 /// of the mnemonic. 5693 bool ARMAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) { 5694 MCAsmParser &Parser = getParser(); 5695 SMLoc S, E; 5696 5697 // Check if the current operand has a custom associated parser, if so, try to 5698 // custom parse the operand, or fallback to the general approach. 5699 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 5700 if (ResTy == MatchOperand_Success) 5701 return false; 5702 // If there wasn't a custom match, try the generic matcher below. Otherwise, 5703 // there was a match, but an error occurred, in which case, just return that 5704 // the operand parsing failed. 5705 if (ResTy == MatchOperand_ParseFail) 5706 return true; 5707 5708 switch (getLexer().getKind()) { 5709 default: 5710 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 5711 return true; 5712 case AsmToken::Identifier: { 5713 // If we've seen a branch mnemonic, the next operand must be a label. This 5714 // is true even if the label is a register name. So "br r1" means branch to 5715 // label "r1". 5716 bool ExpectLabel = Mnemonic == "b" || Mnemonic == "bl"; 5717 if (!ExpectLabel) { 5718 if (!tryParseRegisterWithWriteBack(Operands)) 5719 return false; 5720 int Res = tryParseShiftRegister(Operands); 5721 if (Res == 0) // success 5722 return false; 5723 else if (Res == -1) // irrecoverable error 5724 return true; 5725 // If this is VMRS, check for the apsr_nzcv operand. 5726 if (Mnemonic == "vmrs" && 5727 Parser.getTok().getString().equals_lower("apsr_nzcv")) { 5728 S = Parser.getTok().getLoc(); 5729 Parser.Lex(); 5730 Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S)); 5731 return false; 5732 } 5733 } 5734 5735 // Fall though for the Identifier case that is not a register or a 5736 // special name. 5737 LLVM_FALLTHROUGH; 5738 } 5739 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 5740 case AsmToken::Integer: // things like 1f and 2b as a branch targets 5741 case AsmToken::String: // quoted label names. 5742 case AsmToken::Dot: { // . as a branch target 5743 // This was not a register so parse other operands that start with an 5744 // identifier (like labels) as expressions and create them as immediates. 5745 const MCExpr *IdVal; 5746 S = Parser.getTok().getLoc(); 5747 if (getParser().parseExpression(IdVal)) 5748 return true; 5749 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 5750 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 5751 return false; 5752 } 5753 case AsmToken::LBrac: 5754 return parseMemory(Operands); 5755 case AsmToken::LCurly: 5756 return parseRegisterList(Operands, !Mnemonic.startswith("clr")); 5757 case AsmToken::Dollar: 5758 case AsmToken::Hash: 5759 // #42 -> immediate. 5760 S = Parser.getTok().getLoc(); 5761 Parser.Lex(); 5762 5763 if (Parser.getTok().isNot(AsmToken::Colon)) { 5764 bool isNegative = Parser.getTok().is(AsmToken::Minus); 5765 const MCExpr *ImmVal; 5766 if (getParser().parseExpression(ImmVal)) 5767 return true; 5768 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 5769 if (CE) { 5770 int32_t Val = CE->getValue(); 5771 if (isNegative && Val == 0) 5772 ImmVal = MCConstantExpr::create(std::numeric_limits<int32_t>::min(), 5773 getContext()); 5774 } 5775 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 5776 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 5777 5778 // There can be a trailing '!' on operands that we want as a separate 5779 // '!' Token operand. Handle that here. For example, the compatibility 5780 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'. 5781 if (Parser.getTok().is(AsmToken::Exclaim)) { 5782 Operands.push_back(ARMOperand::CreateToken(Parser.getTok().getString(), 5783 Parser.getTok().getLoc())); 5784 Parser.Lex(); // Eat exclaim token 5785 } 5786 return false; 5787 } 5788 // w/ a ':' after the '#', it's just like a plain ':'. 5789 LLVM_FALLTHROUGH; 5790 5791 case AsmToken::Colon: { 5792 S = Parser.getTok().getLoc(); 5793 // ":lower16:" and ":upper16:" expression prefixes 5794 // FIXME: Check it's an expression prefix, 5795 // e.g. (FOO - :lower16:BAR) isn't legal. 5796 ARMMCExpr::VariantKind RefKind; 5797 if (parsePrefix(RefKind)) 5798 return true; 5799 5800 const MCExpr *SubExprVal; 5801 if (getParser().parseExpression(SubExprVal)) 5802 return true; 5803 5804 const MCExpr *ExprVal = ARMMCExpr::create(RefKind, SubExprVal, 5805 getContext()); 5806 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 5807 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 5808 return false; 5809 } 5810 case AsmToken::Equal: { 5811 S = Parser.getTok().getLoc(); 5812 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val) 5813 return Error(S, "unexpected token in operand"); 5814 Parser.Lex(); // Eat '=' 5815 const MCExpr *SubExprVal; 5816 if (getParser().parseExpression(SubExprVal)) 5817 return true; 5818 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 5819 5820 // execute-only: we assume that assembly programmers know what they are 5821 // doing and allow literal pool creation here 5822 Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E)); 5823 return false; 5824 } 5825 } 5826 } 5827 5828 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 5829 // :lower16: and :upper16:. 5830 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 5831 MCAsmParser &Parser = getParser(); 5832 RefKind = ARMMCExpr::VK_ARM_None; 5833 5834 // consume an optional '#' (GNU compatibility) 5835 if (getLexer().is(AsmToken::Hash)) 5836 Parser.Lex(); 5837 5838 // :lower16: and :upper16: modifiers 5839 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 5840 Parser.Lex(); // Eat ':' 5841 5842 if (getLexer().isNot(AsmToken::Identifier)) { 5843 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 5844 return true; 5845 } 5846 5847 enum { 5848 COFF = (1 << MCObjectFileInfo::IsCOFF), 5849 ELF = (1 << MCObjectFileInfo::IsELF), 5850 MACHO = (1 << MCObjectFileInfo::IsMachO), 5851 WASM = (1 << MCObjectFileInfo::IsWasm), 5852 }; 5853 static const struct PrefixEntry { 5854 const char *Spelling; 5855 ARMMCExpr::VariantKind VariantKind; 5856 uint8_t SupportedFormats; 5857 } PrefixEntries[] = { 5858 { "lower16", ARMMCExpr::VK_ARM_LO16, COFF | ELF | MACHO }, 5859 { "upper16", ARMMCExpr::VK_ARM_HI16, COFF | ELF | MACHO }, 5860 }; 5861 5862 StringRef IDVal = Parser.getTok().getIdentifier(); 5863 5864 const auto &Prefix = 5865 std::find_if(std::begin(PrefixEntries), std::end(PrefixEntries), 5866 [&IDVal](const PrefixEntry &PE) { 5867 return PE.Spelling == IDVal; 5868 }); 5869 if (Prefix == std::end(PrefixEntries)) { 5870 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 5871 return true; 5872 } 5873 5874 uint8_t CurrentFormat; 5875 switch (getContext().getObjectFileInfo()->getObjectFileType()) { 5876 case MCObjectFileInfo::IsMachO: 5877 CurrentFormat = MACHO; 5878 break; 5879 case MCObjectFileInfo::IsELF: 5880 CurrentFormat = ELF; 5881 break; 5882 case MCObjectFileInfo::IsCOFF: 5883 CurrentFormat = COFF; 5884 break; 5885 case MCObjectFileInfo::IsWasm: 5886 CurrentFormat = WASM; 5887 break; 5888 case MCObjectFileInfo::IsXCOFF: 5889 llvm_unreachable("unexpected object format"); 5890 break; 5891 } 5892 5893 if (~Prefix->SupportedFormats & CurrentFormat) { 5894 Error(Parser.getTok().getLoc(), 5895 "cannot represent relocation in the current file format"); 5896 return true; 5897 } 5898 5899 RefKind = Prefix->VariantKind; 5900 Parser.Lex(); 5901 5902 if (getLexer().isNot(AsmToken::Colon)) { 5903 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 5904 return true; 5905 } 5906 Parser.Lex(); // Eat the last ':' 5907 5908 return false; 5909 } 5910 5911 /// Given a mnemonic, split out possible predication code and carry 5912 /// setting letters to form a canonical mnemonic and flags. 5913 // 5914 // FIXME: Would be nice to autogen this. 5915 // FIXME: This is a bit of a maze of special cases. 5916 StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 5917 StringRef ExtraToken, 5918 unsigned &PredicationCode, 5919 unsigned &VPTPredicationCode, 5920 bool &CarrySetting, 5921 unsigned &ProcessorIMod, 5922 StringRef &ITMask) { 5923 PredicationCode = ARMCC::AL; 5924 VPTPredicationCode = ARMVCC::None; 5925 CarrySetting = false; 5926 ProcessorIMod = 0; 5927 5928 // Ignore some mnemonics we know aren't predicated forms. 5929 // 5930 // FIXME: Would be nice to autogen this. 5931 if ((Mnemonic == "movs" && isThumb()) || 5932 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 5933 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 5934 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 5935 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 5936 Mnemonic == "vaclt" || Mnemonic == "vacle" || Mnemonic == "hlt" || 5937 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 5938 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 5939 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" || 5940 Mnemonic == "fmuls" || Mnemonic == "vmaxnm" || Mnemonic == "vminnm" || 5941 Mnemonic == "vcvta" || Mnemonic == "vcvtn" || Mnemonic == "vcvtp" || 5942 Mnemonic == "vcvtm" || Mnemonic == "vrinta" || Mnemonic == "vrintn" || 5943 Mnemonic == "vrintp" || Mnemonic == "vrintm" || Mnemonic == "hvc" || 5944 Mnemonic.startswith("vsel") || Mnemonic == "vins" || Mnemonic == "vmovx" || 5945 Mnemonic == "bxns" || Mnemonic == "blxns" || 5946 Mnemonic == "vudot" || Mnemonic == "vsdot" || 5947 Mnemonic == "vcmla" || Mnemonic == "vcadd" || 5948 Mnemonic == "vfmal" || Mnemonic == "vfmsl" || 5949 Mnemonic == "wls" || Mnemonic == "le" || Mnemonic == "dls" || 5950 Mnemonic == "csel" || Mnemonic == "csinc" || 5951 Mnemonic == "csinv" || Mnemonic == "csneg" || Mnemonic == "cinc" || 5952 Mnemonic == "cinv" || Mnemonic == "cneg" || Mnemonic == "cset" || 5953 Mnemonic == "csetm") 5954 return Mnemonic; 5955 5956 // First, split out any predication code. Ignore mnemonics we know aren't 5957 // predicated but do have a carry-set and so weren't caught above. 5958 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 5959 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 5960 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 5961 Mnemonic != "sbcs" && Mnemonic != "rscs" && 5962 !(hasMVE() && 5963 (Mnemonic == "vmine" || 5964 Mnemonic == "vshle" || Mnemonic == "vshlt" || Mnemonic == "vshllt" || 5965 Mnemonic == "vmvne" || Mnemonic == "vorne" || 5966 Mnemonic == "vnege" || Mnemonic == "vnegt" || 5967 Mnemonic.startswith("vq")))) { 5968 unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2)); 5969 if (CC != ~0U) { 5970 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 5971 PredicationCode = CC; 5972 } 5973 } 5974 5975 // Next, determine if we have a carry setting bit. We explicitly ignore all 5976 // the instructions we know end in 's'. 5977 if (Mnemonic.endswith("s") && 5978 !(Mnemonic == "cps" || Mnemonic == "mls" || 5979 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 5980 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 5981 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 5982 Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" || 5983 Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" || 5984 Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" || 5985 Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" || 5986 Mnemonic == "vfms" || Mnemonic == "vfnms" || Mnemonic == "fconsts" || 5987 Mnemonic == "bxns" || Mnemonic == "blxns" || 5988 (Mnemonic == "movs" && isThumb()))) { 5989 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 5990 CarrySetting = true; 5991 } 5992 5993 // The "cps" instruction can have a interrupt mode operand which is glued into 5994 // the mnemonic. Check if this is the case, split it and parse the imod op 5995 if (Mnemonic.startswith("cps")) { 5996 // Split out any imod code. 5997 unsigned IMod = 5998 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 5999 .Case("ie", ARM_PROC::IE) 6000 .Case("id", ARM_PROC::ID) 6001 .Default(~0U); 6002 if (IMod != ~0U) { 6003 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 6004 ProcessorIMod = IMod; 6005 } 6006 } 6007 6008 if (isMnemonicVPTPredicable(Mnemonic, ExtraToken) && Mnemonic != "vmovlt" && 6009 Mnemonic != "vshllt" && Mnemonic != "vrshrnt" && Mnemonic != "vshrnt" && 6010 Mnemonic != "vqrshrunt" && Mnemonic != "vqshrunt" && 6011 Mnemonic != "vqrshrnt" && Mnemonic != "vqshrnt") { 6012 unsigned CC = ARMVectorCondCodeFromString(Mnemonic.substr(Mnemonic.size()-1)); 6013 if (CC != ~0U) { 6014 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-1); 6015 VPTPredicationCode = CC; 6016 } 6017 return Mnemonic; 6018 } 6019 6020 // The "it" instruction has the condition mask on the end of the mnemonic. 6021 if (Mnemonic.startswith("it")) { 6022 ITMask = Mnemonic.slice(2, Mnemonic.size()); 6023 Mnemonic = Mnemonic.slice(0, 2); 6024 } 6025 6026 if (Mnemonic.startswith("vpst")) { 6027 ITMask = Mnemonic.slice(4, Mnemonic.size()); 6028 Mnemonic = Mnemonic.slice(0, 4); 6029 } 6030 else if (Mnemonic.startswith("vpt")) { 6031 ITMask = Mnemonic.slice(3, Mnemonic.size()); 6032 Mnemonic = Mnemonic.slice(0, 3); 6033 } 6034 6035 return Mnemonic; 6036 } 6037 6038 /// Given a canonical mnemonic, determine if the instruction ever allows 6039 /// inclusion of carry set or predication code operands. 6040 // 6041 // FIXME: It would be nice to autogen this. 6042 void ARMAsmParser::getMnemonicAcceptInfo(StringRef Mnemonic, 6043 StringRef ExtraToken, 6044 StringRef FullInst, 6045 bool &CanAcceptCarrySet, 6046 bool &CanAcceptPredicationCode, 6047 bool &CanAcceptVPTPredicationCode) { 6048 CanAcceptVPTPredicationCode = isMnemonicVPTPredicable(Mnemonic, ExtraToken); 6049 6050 CanAcceptCarrySet = 6051 Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 6052 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 6053 Mnemonic == "add" || Mnemonic == "adc" || Mnemonic == "mul" || 6054 Mnemonic == "bic" || Mnemonic == "asr" || Mnemonic == "orr" || 6055 Mnemonic == "mvn" || Mnemonic == "rsb" || Mnemonic == "rsc" || 6056 Mnemonic == "orn" || Mnemonic == "sbc" || Mnemonic == "eor" || 6057 Mnemonic == "neg" || Mnemonic == "vfm" || Mnemonic == "vfnm" || 6058 (!isThumb() && 6059 (Mnemonic == "smull" || Mnemonic == "mov" || Mnemonic == "mla" || 6060 Mnemonic == "smlal" || Mnemonic == "umlal" || Mnemonic == "umull")); 6061 6062 if (Mnemonic == "bkpt" || Mnemonic == "cbnz" || Mnemonic == "setend" || 6063 Mnemonic == "cps" || Mnemonic == "it" || Mnemonic == "cbz" || 6064 Mnemonic == "trap" || Mnemonic == "hlt" || Mnemonic == "udf" || 6065 Mnemonic.startswith("crc32") || Mnemonic.startswith("cps") || 6066 Mnemonic.startswith("vsel") || Mnemonic == "vmaxnm" || 6067 Mnemonic == "vminnm" || Mnemonic == "vcvta" || Mnemonic == "vcvtn" || 6068 Mnemonic == "vcvtp" || Mnemonic == "vcvtm" || Mnemonic == "vrinta" || 6069 Mnemonic == "vrintn" || Mnemonic == "vrintp" || Mnemonic == "vrintm" || 6070 Mnemonic.startswith("aes") || Mnemonic == "hvc" || Mnemonic == "setpan" || 6071 Mnemonic.startswith("sha1") || Mnemonic.startswith("sha256") || 6072 (FullInst.startswith("vmull") && FullInst.endswith(".p64")) || 6073 Mnemonic == "vmovx" || Mnemonic == "vins" || 6074 Mnemonic == "vudot" || Mnemonic == "vsdot" || 6075 Mnemonic == "vcmla" || Mnemonic == "vcadd" || 6076 Mnemonic == "vfmal" || Mnemonic == "vfmsl" || 6077 Mnemonic == "sb" || Mnemonic == "ssbb" || 6078 Mnemonic == "pssbb" || 6079 Mnemonic == "bfcsel" || Mnemonic == "wls" || 6080 Mnemonic == "dls" || Mnemonic == "le" || Mnemonic == "csel" || 6081 Mnemonic == "csinc" || Mnemonic == "csinv" || Mnemonic == "csneg" || 6082 Mnemonic == "cinc" || Mnemonic == "cinv" || Mnemonic == "cneg" || 6083 Mnemonic == "cset" || Mnemonic == "csetm" || 6084 Mnemonic.startswith("vpt") || Mnemonic.startswith("vpst")) { 6085 // These mnemonics are never predicable 6086 CanAcceptPredicationCode = false; 6087 } else if (!isThumb()) { 6088 // Some instructions are only predicable in Thumb mode 6089 CanAcceptPredicationCode = 6090 Mnemonic != "cdp2" && Mnemonic != "clrex" && Mnemonic != "mcr2" && 6091 Mnemonic != "mcrr2" && Mnemonic != "mrc2" && Mnemonic != "mrrc2" && 6092 Mnemonic != "dmb" && Mnemonic != "dfb" && Mnemonic != "dsb" && 6093 Mnemonic != "isb" && Mnemonic != "pld" && Mnemonic != "pli" && 6094 Mnemonic != "pldw" && Mnemonic != "ldc2" && Mnemonic != "ldc2l" && 6095 Mnemonic != "stc2" && Mnemonic != "stc2l" && 6096 Mnemonic != "tsb" && 6097 !Mnemonic.startswith("rfe") && !Mnemonic.startswith("srs"); 6098 } else if (isThumbOne()) { 6099 if (hasV6MOps()) 6100 CanAcceptPredicationCode = Mnemonic != "movs"; 6101 else 6102 CanAcceptPredicationCode = Mnemonic != "nop" && Mnemonic != "movs"; 6103 } else 6104 CanAcceptPredicationCode = true; 6105 } 6106 6107 // Some Thumb instructions have two operand forms that are not 6108 // available as three operand, convert to two operand form if possible. 6109 // 6110 // FIXME: We would really like to be able to tablegen'erate this. 6111 void ARMAsmParser::tryConvertingToTwoOperandForm(StringRef Mnemonic, 6112 bool CarrySetting, 6113 OperandVector &Operands) { 6114 if (Operands.size() != 6) 6115 return; 6116 6117 const auto &Op3 = static_cast<ARMOperand &>(*Operands[3]); 6118 auto &Op4 = static_cast<ARMOperand &>(*Operands[4]); 6119 if (!Op3.isReg() || !Op4.isReg()) 6120 return; 6121 6122 auto Op3Reg = Op3.getReg(); 6123 auto Op4Reg = Op4.getReg(); 6124 6125 // For most Thumb2 cases we just generate the 3 operand form and reduce 6126 // it in processInstruction(), but the 3 operand form of ADD (t2ADDrr) 6127 // won't accept SP or PC so we do the transformation here taking care 6128 // with immediate range in the 'add sp, sp #imm' case. 6129 auto &Op5 = static_cast<ARMOperand &>(*Operands[5]); 6130 if (isThumbTwo()) { 6131 if (Mnemonic != "add") 6132 return; 6133 bool TryTransform = Op3Reg == ARM::PC || Op4Reg == ARM::PC || 6134 (Op5.isReg() && Op5.getReg() == ARM::PC); 6135 if (!TryTransform) { 6136 TryTransform = (Op3Reg == ARM::SP || Op4Reg == ARM::SP || 6137 (Op5.isReg() && Op5.getReg() == ARM::SP)) && 6138 !(Op3Reg == ARM::SP && Op4Reg == ARM::SP && 6139 Op5.isImm() && !Op5.isImm0_508s4()); 6140 } 6141 if (!TryTransform) 6142 return; 6143 } else if (!isThumbOne()) 6144 return; 6145 6146 if (!(Mnemonic == "add" || Mnemonic == "sub" || Mnemonic == "and" || 6147 Mnemonic == "eor" || Mnemonic == "lsl" || Mnemonic == "lsr" || 6148 Mnemonic == "asr" || Mnemonic == "adc" || Mnemonic == "sbc" || 6149 Mnemonic == "ror" || Mnemonic == "orr" || Mnemonic == "bic")) 6150 return; 6151 6152 // If first 2 operands of a 3 operand instruction are the same 6153 // then transform to 2 operand version of the same instruction 6154 // e.g. 'adds r0, r0, #1' transforms to 'adds r0, #1' 6155 bool Transform = Op3Reg == Op4Reg; 6156 6157 // For communtative operations, we might be able to transform if we swap 6158 // Op4 and Op5. The 'ADD Rdm, SP, Rdm' form is already handled specially 6159 // as tADDrsp. 6160 const ARMOperand *LastOp = &Op5; 6161 bool Swap = false; 6162 if (!Transform && Op5.isReg() && Op3Reg == Op5.getReg() && 6163 ((Mnemonic == "add" && Op4Reg != ARM::SP) || 6164 Mnemonic == "and" || Mnemonic == "eor" || 6165 Mnemonic == "adc" || Mnemonic == "orr")) { 6166 Swap = true; 6167 LastOp = &Op4; 6168 Transform = true; 6169 } 6170 6171 // If both registers are the same then remove one of them from 6172 // the operand list, with certain exceptions. 6173 if (Transform) { 6174 // Don't transform 'adds Rd, Rd, Rm' or 'sub{s} Rd, Rd, Rm' because the 6175 // 2 operand forms don't exist. 6176 if (((Mnemonic == "add" && CarrySetting) || Mnemonic == "sub") && 6177 LastOp->isReg()) 6178 Transform = false; 6179 6180 // Don't transform 'add/sub{s} Rd, Rd, #imm' if the immediate fits into 6181 // 3-bits because the ARMARM says not to. 6182 if ((Mnemonic == "add" || Mnemonic == "sub") && LastOp->isImm0_7()) 6183 Transform = false; 6184 } 6185 6186 if (Transform) { 6187 if (Swap) 6188 std::swap(Op4, Op5); 6189 Operands.erase(Operands.begin() + 3); 6190 } 6191 } 6192 6193 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 6194 OperandVector &Operands) { 6195 // FIXME: This is all horribly hacky. We really need a better way to deal 6196 // with optional operands like this in the matcher table. 6197 6198 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 6199 // another does not. Specifically, the MOVW instruction does not. So we 6200 // special case it here and remove the defaulted (non-setting) cc_out 6201 // operand if that's the instruction we're trying to match. 6202 // 6203 // We do this as post-processing of the explicit operands rather than just 6204 // conditionally adding the cc_out in the first place because we need 6205 // to check the type of the parsed immediate operand. 6206 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 6207 !static_cast<ARMOperand &>(*Operands[4]).isModImm() && 6208 static_cast<ARMOperand &>(*Operands[4]).isImm0_65535Expr() && 6209 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0) 6210 return true; 6211 6212 // Register-register 'add' for thumb does not have a cc_out operand 6213 // when there are only two register operands. 6214 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 6215 static_cast<ARMOperand &>(*Operands[3]).isReg() && 6216 static_cast<ARMOperand &>(*Operands[4]).isReg() && 6217 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0) 6218 return true; 6219 // Register-register 'add' for thumb does not have a cc_out operand 6220 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 6221 // have to check the immediate range here since Thumb2 has a variant 6222 // that can handle a different range and has a cc_out operand. 6223 if (((isThumb() && Mnemonic == "add") || 6224 (isThumbTwo() && Mnemonic == "sub")) && 6225 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() && 6226 static_cast<ARMOperand &>(*Operands[4]).isReg() && 6227 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::SP && 6228 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && 6229 ((Mnemonic == "add" && static_cast<ARMOperand &>(*Operands[5]).isReg()) || 6230 static_cast<ARMOperand &>(*Operands[5]).isImm0_1020s4())) 6231 return true; 6232 // For Thumb2, add/sub immediate does not have a cc_out operand for the 6233 // imm0_4095 variant. That's the least-preferred variant when 6234 // selecting via the generic "add" mnemonic, so to know that we 6235 // should remove the cc_out operand, we have to explicitly check that 6236 // it's not one of the other variants. Ugh. 6237 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 6238 Operands.size() == 6 && static_cast<ARMOperand &>(*Operands[3]).isReg() && 6239 static_cast<ARMOperand &>(*Operands[4]).isReg() && 6240 static_cast<ARMOperand &>(*Operands[5]).isImm()) { 6241 // Nest conditions rather than one big 'if' statement for readability. 6242 // 6243 // If both registers are low, we're in an IT block, and the immediate is 6244 // in range, we should use encoding T1 instead, which has a cc_out. 6245 if (inITBlock() && 6246 isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) && 6247 isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) && 6248 static_cast<ARMOperand &>(*Operands[5]).isImm0_7()) 6249 return false; 6250 // Check against T3. If the second register is the PC, this is an 6251 // alternate form of ADR, which uses encoding T4, so check for that too. 6252 if (static_cast<ARMOperand &>(*Operands[4]).getReg() != ARM::PC && 6253 static_cast<ARMOperand &>(*Operands[5]).isT2SOImm()) 6254 return false; 6255 6256 // Otherwise, we use encoding T4, which does not have a cc_out 6257 // operand. 6258 return true; 6259 } 6260 6261 // The thumb2 multiply instruction doesn't have a CCOut register, so 6262 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 6263 // use the 16-bit encoding or not. 6264 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 6265 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && 6266 static_cast<ARMOperand &>(*Operands[3]).isReg() && 6267 static_cast<ARMOperand &>(*Operands[4]).isReg() && 6268 static_cast<ARMOperand &>(*Operands[5]).isReg() && 6269 // If the registers aren't low regs, the destination reg isn't the 6270 // same as one of the source regs, or the cc_out operand is zero 6271 // outside of an IT block, we have to use the 32-bit encoding, so 6272 // remove the cc_out operand. 6273 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) || 6274 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) || 6275 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[5]).getReg()) || 6276 !inITBlock() || (static_cast<ARMOperand &>(*Operands[3]).getReg() != 6277 static_cast<ARMOperand &>(*Operands[5]).getReg() && 6278 static_cast<ARMOperand &>(*Operands[3]).getReg() != 6279 static_cast<ARMOperand &>(*Operands[4]).getReg()))) 6280 return true; 6281 6282 // Also check the 'mul' syntax variant that doesn't specify an explicit 6283 // destination register. 6284 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && 6285 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && 6286 static_cast<ARMOperand &>(*Operands[3]).isReg() && 6287 static_cast<ARMOperand &>(*Operands[4]).isReg() && 6288 // If the registers aren't low regs or the cc_out operand is zero 6289 // outside of an IT block, we have to use the 32-bit encoding, so 6290 // remove the cc_out operand. 6291 (!isARMLowRegister(static_cast<ARMOperand &>(*Operands[3]).getReg()) || 6292 !isARMLowRegister(static_cast<ARMOperand &>(*Operands[4]).getReg()) || 6293 !inITBlock())) 6294 return true; 6295 6296 // Register-register 'add/sub' for thumb does not have a cc_out operand 6297 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 6298 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 6299 // right, this will result in better diagnostics (which operand is off) 6300 // anyway. 6301 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 6302 (Operands.size() == 5 || Operands.size() == 6) && 6303 static_cast<ARMOperand &>(*Operands[3]).isReg() && 6304 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::SP && 6305 static_cast<ARMOperand &>(*Operands[1]).getReg() == 0 && 6306 (static_cast<ARMOperand &>(*Operands[4]).isImm() || 6307 (Operands.size() == 6 && 6308 static_cast<ARMOperand &>(*Operands[5]).isImm()))) 6309 return true; 6310 6311 return false; 6312 } 6313 6314 bool ARMAsmParser::shouldOmitPredicateOperand(StringRef Mnemonic, 6315 OperandVector &Operands) { 6316 // VRINT{Z, X} have a predicate operand in VFP, but not in NEON 6317 unsigned RegIdx = 3; 6318 if ((Mnemonic == "vrintz" || Mnemonic == "vrintx") && 6319 (static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f32" || 6320 static_cast<ARMOperand &>(*Operands[2]).getToken() == ".f16")) { 6321 if (static_cast<ARMOperand &>(*Operands[3]).isToken() && 6322 (static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f32" || 6323 static_cast<ARMOperand &>(*Operands[3]).getToken() == ".f16")) 6324 RegIdx = 4; 6325 6326 if (static_cast<ARMOperand &>(*Operands[RegIdx]).isReg() && 6327 (ARMMCRegisterClasses[ARM::DPRRegClassID].contains( 6328 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()) || 6329 ARMMCRegisterClasses[ARM::QPRRegClassID].contains( 6330 static_cast<ARMOperand &>(*Operands[RegIdx]).getReg()))) 6331 return true; 6332 } 6333 return false; 6334 } 6335 6336 bool ARMAsmParser::shouldOmitVectorPredicateOperand(StringRef Mnemonic, 6337 OperandVector &Operands) { 6338 if (!hasMVE() || Operands.size() < 3) 6339 return true; 6340 6341 if (Mnemonic.startswith("vmov") && 6342 !(Mnemonic.startswith("vmovl") || Mnemonic.startswith("vmovn") || 6343 Mnemonic.startswith("vmovx"))) { 6344 for (auto &Operand : Operands) { 6345 if (static_cast<ARMOperand &>(*Operand).isVectorIndex() || 6346 ((*Operand).isReg() && 6347 (ARMMCRegisterClasses[ARM::SPRRegClassID].contains( 6348 (*Operand).getReg()) || 6349 ARMMCRegisterClasses[ARM::DPRRegClassID].contains( 6350 (*Operand).getReg())))) { 6351 return true; 6352 } 6353 } 6354 return false; 6355 } else { 6356 for (auto &Operand : Operands) { 6357 // We check the larger class QPR instead of just the legal class 6358 // MQPR, to more accurately report errors when using Q registers 6359 // outside of the allowed range. 6360 if (static_cast<ARMOperand &>(*Operand).isVectorIndex() || 6361 (Operand->isReg() && 6362 (ARMMCRegisterClasses[ARM::QPRRegClassID].contains( 6363 Operand->getReg())))) 6364 return false; 6365 } 6366 return true; 6367 } 6368 } 6369 6370 static bool isDataTypeToken(StringRef Tok) { 6371 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || 6372 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || 6373 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || 6374 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || 6375 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || 6376 Tok == ".f" || Tok == ".d"; 6377 } 6378 6379 // FIXME: This bit should probably be handled via an explicit match class 6380 // in the .td files that matches the suffix instead of having it be 6381 // a literal string token the way it is now. 6382 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { 6383 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); 6384 } 6385 6386 static void applyMnemonicAliases(StringRef &Mnemonic, 6387 const FeatureBitset &Features, 6388 unsigned VariantID); 6389 6390 // The GNU assembler has aliases of ldrd and strd with the second register 6391 // omitted. We don't have a way to do that in tablegen, so fix it up here. 6392 // 6393 // We have to be careful to not emit an invalid Rt2 here, because the rest of 6394 // the assmebly parser could then generate confusing diagnostics refering to 6395 // it. If we do find anything that prevents us from doing the transformation we 6396 // bail out, and let the assembly parser report an error on the instruction as 6397 // it is written. 6398 void ARMAsmParser::fixupGNULDRDAlias(StringRef Mnemonic, 6399 OperandVector &Operands) { 6400 if (Mnemonic != "ldrd" && Mnemonic != "strd") 6401 return; 6402 if (Operands.size() < 4) 6403 return; 6404 6405 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[2]); 6406 ARMOperand &Op3 = static_cast<ARMOperand &>(*Operands[3]); 6407 6408 if (!Op2.isReg()) 6409 return; 6410 if (!Op3.isMem()) 6411 return; 6412 6413 const MCRegisterClass &GPR = MRI->getRegClass(ARM::GPRRegClassID); 6414 if (!GPR.contains(Op2.getReg())) 6415 return; 6416 6417 unsigned RtEncoding = MRI->getEncodingValue(Op2.getReg()); 6418 if (!isThumb() && (RtEncoding & 1)) { 6419 // In ARM mode, the registers must be from an aligned pair, this 6420 // restriction does not apply in Thumb mode. 6421 return; 6422 } 6423 if (Op2.getReg() == ARM::PC) 6424 return; 6425 unsigned PairedReg = GPR.getRegister(RtEncoding + 1); 6426 if (!PairedReg || PairedReg == ARM::PC || 6427 (PairedReg == ARM::SP && !hasV8Ops())) 6428 return; 6429 6430 Operands.insert( 6431 Operands.begin() + 3, 6432 ARMOperand::CreateReg(PairedReg, Op2.getStartLoc(), Op2.getEndLoc())); 6433 } 6434 6435 /// Parse an arm instruction mnemonic followed by its operands. 6436 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, 6437 SMLoc NameLoc, OperandVector &Operands) { 6438 MCAsmParser &Parser = getParser(); 6439 6440 // Apply mnemonic aliases before doing anything else, as the destination 6441 // mnemonic may include suffices and we want to handle them normally. 6442 // The generic tblgen'erated code does this later, at the start of 6443 // MatchInstructionImpl(), but that's too late for aliases that include 6444 // any sort of suffix. 6445 const FeatureBitset &AvailableFeatures = getAvailableFeatures(); 6446 unsigned AssemblerDialect = getParser().getAssemblerDialect(); 6447 applyMnemonicAliases(Name, AvailableFeatures, AssemblerDialect); 6448 6449 // First check for the ARM-specific .req directive. 6450 if (Parser.getTok().is(AsmToken::Identifier) && 6451 Parser.getTok().getIdentifier() == ".req") { 6452 parseDirectiveReq(Name, NameLoc); 6453 // We always return 'error' for this, as we're done with this 6454 // statement and don't need to match the 'instruction." 6455 return true; 6456 } 6457 6458 // Create the leading tokens for the mnemonic, split by '.' characters. 6459 size_t Start = 0, Next = Name.find('.'); 6460 StringRef Mnemonic = Name.slice(Start, Next); 6461 StringRef ExtraToken = Name.slice(Next, Name.find(' ', Next + 1)); 6462 6463 // Split out the predication code and carry setting flag from the mnemonic. 6464 unsigned PredicationCode; 6465 unsigned VPTPredicationCode; 6466 unsigned ProcessorIMod; 6467 bool CarrySetting; 6468 StringRef ITMask; 6469 Mnemonic = splitMnemonic(Mnemonic, ExtraToken, PredicationCode, VPTPredicationCode, 6470 CarrySetting, ProcessorIMod, ITMask); 6471 6472 // In Thumb1, only the branch (B) instruction can be predicated. 6473 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 6474 return Error(NameLoc, "conditional execution not supported in Thumb1"); 6475 } 6476 6477 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 6478 6479 // Handle the mask for IT and VPT instructions. In ARMOperand and 6480 // MCOperand, this is stored in a format independent of the 6481 // condition code: the lowest set bit indicates the end of the 6482 // encoding, and above that, a 1 bit indicates 'else', and an 0 6483 // indicates 'then'. E.g. 6484 // IT -> 1000 6485 // ITx -> x100 (ITT -> 0100, ITE -> 1100) 6486 // ITxy -> xy10 (e.g. ITET -> 1010) 6487 // ITxyz -> xyz1 (e.g. ITEET -> 1101) 6488 if (Mnemonic == "it" || Mnemonic.startswith("vpt") || 6489 Mnemonic.startswith("vpst")) { 6490 SMLoc Loc = Mnemonic == "it" ? SMLoc::getFromPointer(NameLoc.getPointer() + 2) : 6491 Mnemonic == "vpt" ? SMLoc::getFromPointer(NameLoc.getPointer() + 3) : 6492 SMLoc::getFromPointer(NameLoc.getPointer() + 4); 6493 if (ITMask.size() > 3) { 6494 if (Mnemonic == "it") 6495 return Error(Loc, "too many conditions on IT instruction"); 6496 return Error(Loc, "too many conditions on VPT instruction"); 6497 } 6498 unsigned Mask = 8; 6499 for (unsigned i = ITMask.size(); i != 0; --i) { 6500 char pos = ITMask[i - 1]; 6501 if (pos != 't' && pos != 'e') { 6502 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 6503 } 6504 Mask >>= 1; 6505 if (ITMask[i - 1] == 'e') 6506 Mask |= 8; 6507 } 6508 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 6509 } 6510 6511 // FIXME: This is all a pretty gross hack. We should automatically handle 6512 // optional operands like this via tblgen. 6513 6514 // Next, add the CCOut and ConditionCode operands, if needed. 6515 // 6516 // For mnemonics which can ever incorporate a carry setting bit or predication 6517 // code, our matching model involves us always generating CCOut and 6518 // ConditionCode operands to match the mnemonic "as written" and then we let 6519 // the matcher deal with finding the right instruction or generating an 6520 // appropriate error. 6521 bool CanAcceptCarrySet, CanAcceptPredicationCode, CanAcceptVPTPredicationCode; 6522 getMnemonicAcceptInfo(Mnemonic, ExtraToken, Name, CanAcceptCarrySet, 6523 CanAcceptPredicationCode, CanAcceptVPTPredicationCode); 6524 6525 // If we had a carry-set on an instruction that can't do that, issue an 6526 // error. 6527 if (!CanAcceptCarrySet && CarrySetting) { 6528 return Error(NameLoc, "instruction '" + Mnemonic + 6529 "' can not set flags, but 's' suffix specified"); 6530 } 6531 // If we had a predication code on an instruction that can't do that, issue an 6532 // error. 6533 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 6534 return Error(NameLoc, "instruction '" + Mnemonic + 6535 "' is not predicable, but condition code specified"); 6536 } 6537 6538 // If we had a VPT predication code on an instruction that can't do that, issue an 6539 // error. 6540 if (!CanAcceptVPTPredicationCode && VPTPredicationCode != ARMVCC::None) { 6541 return Error(NameLoc, "instruction '" + Mnemonic + 6542 "' is not VPT predicable, but VPT code T/E is specified"); 6543 } 6544 6545 // Add the carry setting operand, if necessary. 6546 if (CanAcceptCarrySet) { 6547 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 6548 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 6549 Loc)); 6550 } 6551 6552 // Add the predication code operand, if necessary. 6553 if (CanAcceptPredicationCode) { 6554 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 6555 CarrySetting); 6556 Operands.push_back(ARMOperand::CreateCondCode( 6557 ARMCC::CondCodes(PredicationCode), Loc)); 6558 } 6559 6560 // Add the VPT predication code operand, if necessary. 6561 // FIXME: We don't add them for the instructions filtered below as these can 6562 // have custom operands which need special parsing. This parsing requires 6563 // the operand to be in the same place in the OperandVector as their 6564 // definition in tblgen. Since these instructions may also have the 6565 // scalar predication operand we do not add the vector one and leave until 6566 // now to fix it up. 6567 if (CanAcceptVPTPredicationCode && Mnemonic != "vmov") { 6568 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 6569 CarrySetting); 6570 Operands.push_back(ARMOperand::CreateVPTPred( 6571 ARMVCC::VPTCodes(VPTPredicationCode), Loc)); 6572 } 6573 6574 // Add the processor imod operand, if necessary. 6575 if (ProcessorIMod) { 6576 Operands.push_back(ARMOperand::CreateImm( 6577 MCConstantExpr::create(ProcessorIMod, getContext()), 6578 NameLoc, NameLoc)); 6579 } else if (Mnemonic == "cps" && isMClass()) { 6580 return Error(NameLoc, "instruction 'cps' requires effect for M-class"); 6581 } 6582 6583 // Add the remaining tokens in the mnemonic. 6584 while (Next != StringRef::npos) { 6585 Start = Next; 6586 Next = Name.find('.', Start + 1); 6587 ExtraToken = Name.slice(Start, Next); 6588 6589 // Some NEON instructions have an optional datatype suffix that is 6590 // completely ignored. Check for that. 6591 if (isDataTypeToken(ExtraToken) && 6592 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) 6593 continue; 6594 6595 // For for ARM mode generate an error if the .n qualifier is used. 6596 if (ExtraToken == ".n" && !isThumb()) { 6597 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 6598 return Error(Loc, "instruction with .n (narrow) qualifier not allowed in " 6599 "arm mode"); 6600 } 6601 6602 // The .n qualifier is always discarded as that is what the tables 6603 // and matcher expect. In ARM mode the .w qualifier has no effect, 6604 // so discard it to avoid errors that can be caused by the matcher. 6605 if (ExtraToken != ".n" && (isThumb() || ExtraToken != ".w")) { 6606 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 6607 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 6608 } 6609 } 6610 6611 // Read the remaining operands. 6612 if (getLexer().isNot(AsmToken::EndOfStatement)) { 6613 // Read the first operand. 6614 if (parseOperand(Operands, Mnemonic)) { 6615 return true; 6616 } 6617 6618 while (parseOptionalToken(AsmToken::Comma)) { 6619 // Parse and remember the operand. 6620 if (parseOperand(Operands, Mnemonic)) { 6621 return true; 6622 } 6623 } 6624 } 6625 6626 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list")) 6627 return true; 6628 6629 tryConvertingToTwoOperandForm(Mnemonic, CarrySetting, Operands); 6630 6631 // Some instructions, mostly Thumb, have forms for the same mnemonic that 6632 // do and don't have a cc_out optional-def operand. With some spot-checks 6633 // of the operand list, we can figure out which variant we're trying to 6634 // parse and adjust accordingly before actually matching. We shouldn't ever 6635 // try to remove a cc_out operand that was explicitly set on the 6636 // mnemonic, of course (CarrySetting == true). Reason number #317 the 6637 // table driven matcher doesn't fit well with the ARM instruction set. 6638 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) 6639 Operands.erase(Operands.begin() + 1); 6640 6641 // Some instructions have the same mnemonic, but don't always 6642 // have a predicate. Distinguish them here and delete the 6643 // appropriate predicate if needed. This could be either the scalar 6644 // predication code or the vector predication code. 6645 if (PredicationCode == ARMCC::AL && 6646 shouldOmitPredicateOperand(Mnemonic, Operands)) 6647 Operands.erase(Operands.begin() + 1); 6648 6649 6650 if (hasMVE()) { 6651 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands) && 6652 Mnemonic == "vmov" && PredicationCode == ARMCC::LT) { 6653 // Very nasty hack to deal with the vector predicated variant of vmovlt 6654 // the scalar predicated vmov with condition 'lt'. We can not tell them 6655 // apart until we have parsed their operands. 6656 Operands.erase(Operands.begin() + 1); 6657 Operands.erase(Operands.begin()); 6658 SMLoc MLoc = SMLoc::getFromPointer(NameLoc.getPointer()); 6659 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() + 6660 Mnemonic.size() - 1 + CarrySetting); 6661 Operands.insert(Operands.begin(), 6662 ARMOperand::CreateVPTPred(ARMVCC::None, PLoc)); 6663 Operands.insert(Operands.begin(), 6664 ARMOperand::CreateToken(StringRef("vmovlt"), MLoc)); 6665 } 6666 // For vmov instructions, as mentioned earlier, we did not add the vector 6667 // predication code, since these may contain operands that require 6668 // special parsing. So now we have to see if they require vector 6669 // predication and replace the scalar one with the vector predication 6670 // operand if that is the case. 6671 else if (Mnemonic == "vmov") { 6672 if (!shouldOmitVectorPredicateOperand(Mnemonic, Operands)) { 6673 Operands.erase(Operands.begin() + 1); 6674 SMLoc PLoc = SMLoc::getFromPointer(NameLoc.getPointer() + 6675 Mnemonic.size() + CarrySetting); 6676 Operands.insert(Operands.begin() + 1, 6677 ARMOperand::CreateVPTPred( 6678 ARMVCC::VPTCodes(VPTPredicationCode), PLoc)); 6679 } 6680 } else if (CanAcceptVPTPredicationCode) { 6681 // For all other instructions, make sure only one of the two 6682 // predication operands is left behind, depending on whether we should 6683 // use the vector predication. 6684 if (shouldOmitVectorPredicateOperand(Mnemonic, Operands)) { 6685 if (CanAcceptPredicationCode) 6686 Operands.erase(Operands.begin() + 2); 6687 else 6688 Operands.erase(Operands.begin() + 1); 6689 } else if (CanAcceptPredicationCode && PredicationCode == ARMCC::AL) { 6690 Operands.erase(Operands.begin() + 1); 6691 } 6692 } 6693 } 6694 6695 if (VPTPredicationCode != ARMVCC::None) { 6696 bool usedVPTPredicationCode = false; 6697 for (unsigned I = 1; I < Operands.size(); ++I) 6698 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred()) 6699 usedVPTPredicationCode = true; 6700 if (!usedVPTPredicationCode) { 6701 // If we have a VPT predication code and we haven't just turned it 6702 // into an operand, then it was a mistake for splitMnemonic to 6703 // separate it from the rest of the mnemonic in the first place, 6704 // and this may lead to wrong disassembly (e.g. scalar floating 6705 // point VCMPE is actually a different instruction from VCMP, so 6706 // we mustn't treat them the same). In that situation, glue it 6707 // back on. 6708 Mnemonic = Name.slice(0, Mnemonic.size() + 1); 6709 Operands.erase(Operands.begin()); 6710 Operands.insert(Operands.begin(), 6711 ARMOperand::CreateToken(Mnemonic, NameLoc)); 6712 } 6713 } 6714 6715 // ARM mode 'blx' need special handling, as the register operand version 6716 // is predicable, but the label operand version is not. So, we can't rely 6717 // on the Mnemonic based checking to correctly figure out when to put 6718 // a k_CondCode operand in the list. If we're trying to match the label 6719 // version, remove the k_CondCode operand here. 6720 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 6721 static_cast<ARMOperand &>(*Operands[2]).isImm()) 6722 Operands.erase(Operands.begin() + 1); 6723 6724 // Adjust operands of ldrexd/strexd to MCK_GPRPair. 6725 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint, 6726 // a single GPRPair reg operand is used in the .td file to replace the two 6727 // GPRs. However, when parsing from asm, the two GRPs cannot be 6728 // automatically 6729 // expressed as a GPRPair, so we have to manually merge them. 6730 // FIXME: We would really like to be able to tablegen'erate this. 6731 if (!isThumb() && Operands.size() > 4 && 6732 (Mnemonic == "ldrexd" || Mnemonic == "strexd" || Mnemonic == "ldaexd" || 6733 Mnemonic == "stlexd")) { 6734 bool isLoad = (Mnemonic == "ldrexd" || Mnemonic == "ldaexd"); 6735 unsigned Idx = isLoad ? 2 : 3; 6736 ARMOperand &Op1 = static_cast<ARMOperand &>(*Operands[Idx]); 6737 ARMOperand &Op2 = static_cast<ARMOperand &>(*Operands[Idx + 1]); 6738 6739 const MCRegisterClass &MRC = MRI->getRegClass(ARM::GPRRegClassID); 6740 // Adjust only if Op1 and Op2 are GPRs. 6741 if (Op1.isReg() && Op2.isReg() && MRC.contains(Op1.getReg()) && 6742 MRC.contains(Op2.getReg())) { 6743 unsigned Reg1 = Op1.getReg(); 6744 unsigned Reg2 = Op2.getReg(); 6745 unsigned Rt = MRI->getEncodingValue(Reg1); 6746 unsigned Rt2 = MRI->getEncodingValue(Reg2); 6747 6748 // Rt2 must be Rt + 1 and Rt must be even. 6749 if (Rt + 1 != Rt2 || (Rt & 1)) { 6750 return Error(Op2.getStartLoc(), 6751 isLoad ? "destination operands must be sequential" 6752 : "source operands must be sequential"); 6753 } 6754 unsigned NewReg = MRI->getMatchingSuperReg( 6755 Reg1, ARM::gsub_0, &(MRI->getRegClass(ARM::GPRPairRegClassID))); 6756 Operands[Idx] = 6757 ARMOperand::CreateReg(NewReg, Op1.getStartLoc(), Op2.getEndLoc()); 6758 Operands.erase(Operands.begin() + Idx + 1); 6759 } 6760 } 6761 6762 // GNU Assembler extension (compatibility). 6763 fixupGNULDRDAlias(Mnemonic, Operands); 6764 6765 // FIXME: As said above, this is all a pretty gross hack. This instruction 6766 // does not fit with other "subs" and tblgen. 6767 // Adjust operands of B9.3.19 SUBS PC, LR, #imm (Thumb2) system instruction 6768 // so the Mnemonic is the original name "subs" and delete the predicate 6769 // operand so it will match the table entry. 6770 if (isThumbTwo() && Mnemonic == "sub" && Operands.size() == 6 && 6771 static_cast<ARMOperand &>(*Operands[3]).isReg() && 6772 static_cast<ARMOperand &>(*Operands[3]).getReg() == ARM::PC && 6773 static_cast<ARMOperand &>(*Operands[4]).isReg() && 6774 static_cast<ARMOperand &>(*Operands[4]).getReg() == ARM::LR && 6775 static_cast<ARMOperand &>(*Operands[5]).isImm()) { 6776 Operands.front() = ARMOperand::CreateToken(Name, NameLoc); 6777 Operands.erase(Operands.begin() + 1); 6778 } 6779 return false; 6780 } 6781 6782 // Validate context-sensitive operand constraints. 6783 6784 // return 'true' if register list contains non-low GPR registers, 6785 // 'false' otherwise. If Reg is in the register list or is HiReg, set 6786 // 'containsReg' to true. 6787 static bool checkLowRegisterList(const MCInst &Inst, unsigned OpNo, 6788 unsigned Reg, unsigned HiReg, 6789 bool &containsReg) { 6790 containsReg = false; 6791 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 6792 unsigned OpReg = Inst.getOperand(i).getReg(); 6793 if (OpReg == Reg) 6794 containsReg = true; 6795 // Anything other than a low register isn't legal here. 6796 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 6797 return true; 6798 } 6799 return false; 6800 } 6801 6802 // Check if the specified regisgter is in the register list of the inst, 6803 // starting at the indicated operand number. 6804 static bool listContainsReg(const MCInst &Inst, unsigned OpNo, unsigned Reg) { 6805 for (unsigned i = OpNo, e = Inst.getNumOperands(); i < e; ++i) { 6806 unsigned OpReg = Inst.getOperand(i).getReg(); 6807 if (OpReg == Reg) 6808 return true; 6809 } 6810 return false; 6811 } 6812 6813 // Return true if instruction has the interesting property of being 6814 // allowed in IT blocks, but not being predicable. 6815 static bool instIsBreakpoint(const MCInst &Inst) { 6816 return Inst.getOpcode() == ARM::tBKPT || 6817 Inst.getOpcode() == ARM::BKPT || 6818 Inst.getOpcode() == ARM::tHLT || 6819 Inst.getOpcode() == ARM::HLT; 6820 } 6821 6822 bool ARMAsmParser::validatetLDMRegList(const MCInst &Inst, 6823 const OperandVector &Operands, 6824 unsigned ListNo, bool IsARPop) { 6825 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]); 6826 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!"; 6827 6828 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP); 6829 bool ListContainsLR = listContainsReg(Inst, ListNo, ARM::LR); 6830 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC); 6831 6832 if (!IsARPop && ListContainsSP) 6833 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), 6834 "SP may not be in the register list"); 6835 else if (ListContainsPC && ListContainsLR) 6836 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), 6837 "PC and LR may not be in the register list simultaneously"); 6838 return false; 6839 } 6840 6841 bool ARMAsmParser::validatetSTMRegList(const MCInst &Inst, 6842 const OperandVector &Operands, 6843 unsigned ListNo) { 6844 const ARMOperand &Op = static_cast<const ARMOperand &>(*Operands[ListNo]); 6845 bool HasWritebackToken = Op.isToken() && Op.getToken() == "!"; 6846 6847 bool ListContainsSP = listContainsReg(Inst, ListNo, ARM::SP); 6848 bool ListContainsPC = listContainsReg(Inst, ListNo, ARM::PC); 6849 6850 if (ListContainsSP && ListContainsPC) 6851 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), 6852 "SP and PC may not be in the register list"); 6853 else if (ListContainsSP) 6854 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), 6855 "SP may not be in the register list"); 6856 else if (ListContainsPC) 6857 return Error(Operands[ListNo + HasWritebackToken]->getStartLoc(), 6858 "PC may not be in the register list"); 6859 return false; 6860 } 6861 6862 bool ARMAsmParser::validateLDRDSTRD(MCInst &Inst, 6863 const OperandVector &Operands, 6864 bool Load, bool ARMMode, bool Writeback) { 6865 unsigned RtIndex = Load || !Writeback ? 0 : 1; 6866 unsigned Rt = MRI->getEncodingValue(Inst.getOperand(RtIndex).getReg()); 6867 unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(RtIndex + 1).getReg()); 6868 6869 if (ARMMode) { 6870 // Rt can't be R14. 6871 if (Rt == 14) 6872 return Error(Operands[3]->getStartLoc(), 6873 "Rt can't be R14"); 6874 6875 // Rt must be even-numbered. 6876 if ((Rt & 1) == 1) 6877 return Error(Operands[3]->getStartLoc(), 6878 "Rt must be even-numbered"); 6879 6880 // Rt2 must be Rt + 1. 6881 if (Rt2 != Rt + 1) { 6882 if (Load) 6883 return Error(Operands[3]->getStartLoc(), 6884 "destination operands must be sequential"); 6885 else 6886 return Error(Operands[3]->getStartLoc(), 6887 "source operands must be sequential"); 6888 } 6889 6890 // FIXME: Diagnose m == 15 6891 // FIXME: Diagnose ldrd with m == t || m == t2. 6892 } 6893 6894 if (!ARMMode && Load) { 6895 if (Rt2 == Rt) 6896 return Error(Operands[3]->getStartLoc(), 6897 "destination operands can't be identical"); 6898 } 6899 6900 if (Writeback) { 6901 unsigned Rn = MRI->getEncodingValue(Inst.getOperand(3).getReg()); 6902 6903 if (Rn == Rt || Rn == Rt2) { 6904 if (Load) 6905 return Error(Operands[3]->getStartLoc(), 6906 "base register needs to be different from destination " 6907 "registers"); 6908 else 6909 return Error(Operands[3]->getStartLoc(), 6910 "source register and base register can't be identical"); 6911 } 6912 6913 // FIXME: Diagnose ldrd/strd with writeback and n == 15. 6914 // (Except the immediate form of ldrd?) 6915 } 6916 6917 return false; 6918 } 6919 6920 static int findFirstVectorPredOperandIdx(const MCInstrDesc &MCID) { 6921 for (unsigned i = 0; i < MCID.NumOperands; ++i) { 6922 if (ARM::isVpred(MCID.OpInfo[i].OperandType)) 6923 return i; 6924 } 6925 return -1; 6926 } 6927 6928 static bool isVectorPredicable(const MCInstrDesc &MCID) { 6929 return findFirstVectorPredOperandIdx(MCID) != -1; 6930 } 6931 6932 // FIXME: We would really like to be able to tablegen'erate this. 6933 bool ARMAsmParser::validateInstruction(MCInst &Inst, 6934 const OperandVector &Operands) { 6935 const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); 6936 SMLoc Loc = Operands[0]->getStartLoc(); 6937 6938 // Check the IT block state first. 6939 // NOTE: BKPT and HLT instructions have the interesting property of being 6940 // allowed in IT blocks, but not being predicable. They just always execute. 6941 if (inITBlock() && !instIsBreakpoint(Inst)) { 6942 // The instruction must be predicable. 6943 if (!MCID.isPredicable()) 6944 return Error(Loc, "instructions in IT block must be predicable"); 6945 ARMCC::CondCodes Cond = ARMCC::CondCodes( 6946 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm()); 6947 if (Cond != currentITCond()) { 6948 // Find the condition code Operand to get its SMLoc information. 6949 SMLoc CondLoc; 6950 for (unsigned I = 1; I < Operands.size(); ++I) 6951 if (static_cast<ARMOperand &>(*Operands[I]).isCondCode()) 6952 CondLoc = Operands[I]->getStartLoc(); 6953 return Error(CondLoc, "incorrect condition in IT block; got '" + 6954 StringRef(ARMCondCodeToString(Cond)) + 6955 "', but expected '" + 6956 ARMCondCodeToString(currentITCond()) + "'"); 6957 } 6958 // Check for non-'al' condition codes outside of the IT block. 6959 } else if (isThumbTwo() && MCID.isPredicable() && 6960 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 6961 ARMCC::AL && Inst.getOpcode() != ARM::tBcc && 6962 Inst.getOpcode() != ARM::t2Bcc && 6963 Inst.getOpcode() != ARM::t2BFic) { 6964 return Error(Loc, "predicated instructions must be in IT block"); 6965 } else if (!isThumb() && !useImplicitITARM() && MCID.isPredicable() && 6966 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 6967 ARMCC::AL) { 6968 return Warning(Loc, "predicated instructions should be in IT block"); 6969 } else if (!MCID.isPredicable()) { 6970 // Check the instruction doesn't have a predicate operand anyway 6971 // that it's not allowed to use. Sometimes this happens in order 6972 // to keep instructions the same shape even though one cannot 6973 // legally be predicated, e.g. vmul.f16 vs vmul.f32. 6974 for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) { 6975 if (MCID.OpInfo[i].isPredicate()) { 6976 if (Inst.getOperand(i).getImm() != ARMCC::AL) 6977 return Error(Loc, "instruction is not predicable"); 6978 break; 6979 } 6980 } 6981 } 6982 6983 // PC-setting instructions in an IT block, but not the last instruction of 6984 // the block, are UNPREDICTABLE. 6985 if (inExplicitITBlock() && !lastInITBlock() && isITBlockTerminator(Inst)) { 6986 return Error(Loc, "instruction must be outside of IT block or the last instruction in an IT block"); 6987 } 6988 6989 if (inVPTBlock() && !instIsBreakpoint(Inst)) { 6990 unsigned Bit = extractITMaskBit(VPTState.Mask, VPTState.CurPosition); 6991 if (!isVectorPredicable(MCID)) 6992 return Error(Loc, "instruction in VPT block must be predicable"); 6993 unsigned Pred = Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm(); 6994 unsigned VPTPred = Bit ? ARMVCC::Else : ARMVCC::Then; 6995 if (Pred != VPTPred) { 6996 SMLoc PredLoc; 6997 for (unsigned I = 1; I < Operands.size(); ++I) 6998 if (static_cast<ARMOperand &>(*Operands[I]).isVPTPred()) 6999 PredLoc = Operands[I]->getStartLoc(); 7000 return Error(PredLoc, "incorrect predication in VPT block; got '" + 7001 StringRef(ARMVPTPredToString(ARMVCC::VPTCodes(Pred))) + 7002 "', but expected '" + 7003 ARMVPTPredToString(ARMVCC::VPTCodes(VPTPred)) + "'"); 7004 } 7005 } 7006 else if (isVectorPredicable(MCID) && 7007 Inst.getOperand(findFirstVectorPredOperandIdx(MCID)).getImm() != 7008 ARMVCC::None) 7009 return Error(Loc, "VPT predicated instructions must be in VPT block"); 7010 7011 const unsigned Opcode = Inst.getOpcode(); 7012 switch (Opcode) { 7013 case ARM::t2IT: { 7014 // Encoding is unpredictable if it ever results in a notional 'NV' 7015 // predicate. Since we don't parse 'NV' directly this means an 'AL' 7016 // predicate with an "else" mask bit. 7017 unsigned Cond = Inst.getOperand(0).getImm(); 7018 unsigned Mask = Inst.getOperand(1).getImm(); 7019 7020 // Conditions only allowing a 't' are those with no set bit except 7021 // the lowest-order one that indicates the end of the sequence. In 7022 // other words, powers of 2. 7023 if (Cond == ARMCC::AL && countPopulation(Mask) != 1) 7024 return Error(Loc, "unpredictable IT predicate sequence"); 7025 break; 7026 } 7027 case ARM::LDRD: 7028 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true, 7029 /*Writeback*/false)) 7030 return true; 7031 break; 7032 case ARM::LDRD_PRE: 7033 case ARM::LDRD_POST: 7034 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/true, 7035 /*Writeback*/true)) 7036 return true; 7037 break; 7038 case ARM::t2LDRDi8: 7039 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false, 7040 /*Writeback*/false)) 7041 return true; 7042 break; 7043 case ARM::t2LDRD_PRE: 7044 case ARM::t2LDRD_POST: 7045 if (validateLDRDSTRD(Inst, Operands, /*Load*/true, /*ARMMode*/false, 7046 /*Writeback*/true)) 7047 return true; 7048 break; 7049 case ARM::t2BXJ: { 7050 const unsigned RmReg = Inst.getOperand(0).getReg(); 7051 // Rm = SP is no longer unpredictable in v8-A 7052 if (RmReg == ARM::SP && !hasV8Ops()) 7053 return Error(Operands[2]->getStartLoc(), 7054 "r13 (SP) is an unpredictable operand to BXJ"); 7055 return false; 7056 } 7057 case ARM::STRD: 7058 if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true, 7059 /*Writeback*/false)) 7060 return true; 7061 break; 7062 case ARM::STRD_PRE: 7063 case ARM::STRD_POST: 7064 if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/true, 7065 /*Writeback*/true)) 7066 return true; 7067 break; 7068 case ARM::t2STRD_PRE: 7069 case ARM::t2STRD_POST: 7070 if (validateLDRDSTRD(Inst, Operands, /*Load*/false, /*ARMMode*/false, 7071 /*Writeback*/true)) 7072 return true; 7073 break; 7074 case ARM::STR_PRE_IMM: 7075 case ARM::STR_PRE_REG: 7076 case ARM::t2STR_PRE: 7077 case ARM::STR_POST_IMM: 7078 case ARM::STR_POST_REG: 7079 case ARM::t2STR_POST: 7080 case ARM::STRH_PRE: 7081 case ARM::t2STRH_PRE: 7082 case ARM::STRH_POST: 7083 case ARM::t2STRH_POST: 7084 case ARM::STRB_PRE_IMM: 7085 case ARM::STRB_PRE_REG: 7086 case ARM::t2STRB_PRE: 7087 case ARM::STRB_POST_IMM: 7088 case ARM::STRB_POST_REG: 7089 case ARM::t2STRB_POST: { 7090 // Rt must be different from Rn. 7091 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg()); 7092 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg()); 7093 7094 if (Rt == Rn) 7095 return Error(Operands[3]->getStartLoc(), 7096 "source register and base register can't be identical"); 7097 return false; 7098 } 7099 case ARM::LDR_PRE_IMM: 7100 case ARM::LDR_PRE_REG: 7101 case ARM::t2LDR_PRE: 7102 case ARM::LDR_POST_IMM: 7103 case ARM::LDR_POST_REG: 7104 case ARM::t2LDR_POST: 7105 case ARM::LDRH_PRE: 7106 case ARM::t2LDRH_PRE: 7107 case ARM::LDRH_POST: 7108 case ARM::t2LDRH_POST: 7109 case ARM::LDRSH_PRE: 7110 case ARM::t2LDRSH_PRE: 7111 case ARM::LDRSH_POST: 7112 case ARM::t2LDRSH_POST: 7113 case ARM::LDRB_PRE_IMM: 7114 case ARM::LDRB_PRE_REG: 7115 case ARM::t2LDRB_PRE: 7116 case ARM::LDRB_POST_IMM: 7117 case ARM::LDRB_POST_REG: 7118 case ARM::t2LDRB_POST: 7119 case ARM::LDRSB_PRE: 7120 case ARM::t2LDRSB_PRE: 7121 case ARM::LDRSB_POST: 7122 case ARM::t2LDRSB_POST: { 7123 // Rt must be different from Rn. 7124 const unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg()); 7125 const unsigned Rn = MRI->getEncodingValue(Inst.getOperand(2).getReg()); 7126 7127 if (Rt == Rn) 7128 return Error(Operands[3]->getStartLoc(), 7129 "destination register and base register can't be identical"); 7130 return false; 7131 } 7132 case ARM::SBFX: 7133 case ARM::t2SBFX: 7134 case ARM::UBFX: 7135 case ARM::t2UBFX: { 7136 // Width must be in range [1, 32-lsb]. 7137 unsigned LSB = Inst.getOperand(2).getImm(); 7138 unsigned Widthm1 = Inst.getOperand(3).getImm(); 7139 if (Widthm1 >= 32 - LSB) 7140 return Error(Operands[5]->getStartLoc(), 7141 "bitfield width must be in range [1,32-lsb]"); 7142 return false; 7143 } 7144 // Notionally handles ARM::tLDMIA_UPD too. 7145 case ARM::tLDMIA: { 7146 // If we're parsing Thumb2, the .w variant is available and handles 7147 // most cases that are normally illegal for a Thumb1 LDM instruction. 7148 // We'll make the transformation in processInstruction() if necessary. 7149 // 7150 // Thumb LDM instructions are writeback iff the base register is not 7151 // in the register list. 7152 unsigned Rn = Inst.getOperand(0).getReg(); 7153 bool HasWritebackToken = 7154 (static_cast<ARMOperand &>(*Operands[3]).isToken() && 7155 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!"); 7156 bool ListContainsBase; 7157 if (checkLowRegisterList(Inst, 3, Rn, 0, ListContainsBase) && !isThumbTwo()) 7158 return Error(Operands[3 + HasWritebackToken]->getStartLoc(), 7159 "registers must be in range r0-r7"); 7160 // If we should have writeback, then there should be a '!' token. 7161 if (!ListContainsBase && !HasWritebackToken && !isThumbTwo()) 7162 return Error(Operands[2]->getStartLoc(), 7163 "writeback operator '!' expected"); 7164 // If we should not have writeback, there must not be a '!'. This is 7165 // true even for the 32-bit wide encodings. 7166 if (ListContainsBase && HasWritebackToken) 7167 return Error(Operands[3]->getStartLoc(), 7168 "writeback operator '!' not allowed when base register " 7169 "in register list"); 7170 7171 if (validatetLDMRegList(Inst, Operands, 3)) 7172 return true; 7173 break; 7174 } 7175 case ARM::LDMIA_UPD: 7176 case ARM::LDMDB_UPD: 7177 case ARM::LDMIB_UPD: 7178 case ARM::LDMDA_UPD: 7179 // ARM variants loading and updating the same register are only officially 7180 // UNPREDICTABLE on v7 upwards. Goodness knows what they did before. 7181 if (!hasV7Ops()) 7182 break; 7183 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 7184 return Error(Operands.back()->getStartLoc(), 7185 "writeback register not allowed in register list"); 7186 break; 7187 case ARM::t2LDMIA: 7188 case ARM::t2LDMDB: 7189 if (validatetLDMRegList(Inst, Operands, 3)) 7190 return true; 7191 break; 7192 case ARM::t2STMIA: 7193 case ARM::t2STMDB: 7194 if (validatetSTMRegList(Inst, Operands, 3)) 7195 return true; 7196 break; 7197 case ARM::t2LDMIA_UPD: 7198 case ARM::t2LDMDB_UPD: 7199 case ARM::t2STMIA_UPD: 7200 case ARM::t2STMDB_UPD: 7201 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 7202 return Error(Operands.back()->getStartLoc(), 7203 "writeback register not allowed in register list"); 7204 7205 if (Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) { 7206 if (validatetLDMRegList(Inst, Operands, 3)) 7207 return true; 7208 } else { 7209 if (validatetSTMRegList(Inst, Operands, 3)) 7210 return true; 7211 } 7212 break; 7213 7214 case ARM::sysLDMIA_UPD: 7215 case ARM::sysLDMDA_UPD: 7216 case ARM::sysLDMDB_UPD: 7217 case ARM::sysLDMIB_UPD: 7218 if (!listContainsReg(Inst, 3, ARM::PC)) 7219 return Error(Operands[4]->getStartLoc(), 7220 "writeback register only allowed on system LDM " 7221 "if PC in register-list"); 7222 break; 7223 case ARM::sysSTMIA_UPD: 7224 case ARM::sysSTMDA_UPD: 7225 case ARM::sysSTMDB_UPD: 7226 case ARM::sysSTMIB_UPD: 7227 return Error(Operands[2]->getStartLoc(), 7228 "system STM cannot have writeback register"); 7229 case ARM::tMUL: 7230 // The second source operand must be the same register as the destination 7231 // operand. 7232 // 7233 // In this case, we must directly check the parsed operands because the 7234 // cvtThumbMultiply() function is written in such a way that it guarantees 7235 // this first statement is always true for the new Inst. Essentially, the 7236 // destination is unconditionally copied into the second source operand 7237 // without checking to see if it matches what we actually parsed. 7238 if (Operands.size() == 6 && (((ARMOperand &)*Operands[3]).getReg() != 7239 ((ARMOperand &)*Operands[5]).getReg()) && 7240 (((ARMOperand &)*Operands[3]).getReg() != 7241 ((ARMOperand &)*Operands[4]).getReg())) { 7242 return Error(Operands[3]->getStartLoc(), 7243 "destination register must match source register"); 7244 } 7245 break; 7246 7247 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 7248 // so only issue a diagnostic for thumb1. The instructions will be 7249 // switched to the t2 encodings in processInstruction() if necessary. 7250 case ARM::tPOP: { 7251 bool ListContainsBase; 7252 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, ListContainsBase) && 7253 !isThumbTwo()) 7254 return Error(Operands[2]->getStartLoc(), 7255 "registers must be in range r0-r7 or pc"); 7256 if (validatetLDMRegList(Inst, Operands, 2, !isMClass())) 7257 return true; 7258 break; 7259 } 7260 case ARM::tPUSH: { 7261 bool ListContainsBase; 7262 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, ListContainsBase) && 7263 !isThumbTwo()) 7264 return Error(Operands[2]->getStartLoc(), 7265 "registers must be in range r0-r7 or lr"); 7266 if (validatetSTMRegList(Inst, Operands, 2)) 7267 return true; 7268 break; 7269 } 7270 case ARM::tSTMIA_UPD: { 7271 bool ListContainsBase, InvalidLowList; 7272 InvalidLowList = checkLowRegisterList(Inst, 4, Inst.getOperand(0).getReg(), 7273 0, ListContainsBase); 7274 if (InvalidLowList && !isThumbTwo()) 7275 return Error(Operands[4]->getStartLoc(), 7276 "registers must be in range r0-r7"); 7277 7278 // This would be converted to a 32-bit stm, but that's not valid if the 7279 // writeback register is in the list. 7280 if (InvalidLowList && ListContainsBase) 7281 return Error(Operands[4]->getStartLoc(), 7282 "writeback operator '!' not allowed when base register " 7283 "in register list"); 7284 7285 if (validatetSTMRegList(Inst, Operands, 4)) 7286 return true; 7287 break; 7288 } 7289 case ARM::tADDrSP: 7290 // If the non-SP source operand and the destination operand are not the 7291 // same, we need thumb2 (for the wide encoding), or we have an error. 7292 if (!isThumbTwo() && 7293 Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) { 7294 return Error(Operands[4]->getStartLoc(), 7295 "source register must be the same as destination"); 7296 } 7297 break; 7298 7299 case ARM::t2ADDri: 7300 case ARM::t2ADDri12: 7301 case ARM::t2ADDrr: 7302 case ARM::t2ADDrs: 7303 case ARM::t2SUBri: 7304 case ARM::t2SUBri12: 7305 case ARM::t2SUBrr: 7306 case ARM::t2SUBrs: 7307 if (Inst.getOperand(0).getReg() == ARM::SP && 7308 Inst.getOperand(1).getReg() != ARM::SP) 7309 return Error(Operands[4]->getStartLoc(), 7310 "source register must be sp if destination is sp"); 7311 break; 7312 7313 // Final range checking for Thumb unconditional branch instructions. 7314 case ARM::tB: 7315 if (!(static_cast<ARMOperand &>(*Operands[2])).isSignedOffset<11, 1>()) 7316 return Error(Operands[2]->getStartLoc(), "branch target out of range"); 7317 break; 7318 case ARM::t2B: { 7319 int op = (Operands[2]->isImm()) ? 2 : 3; 7320 if (!static_cast<ARMOperand &>(*Operands[op]).isSignedOffset<24, 1>()) 7321 return Error(Operands[op]->getStartLoc(), "branch target out of range"); 7322 break; 7323 } 7324 // Final range checking for Thumb conditional branch instructions. 7325 case ARM::tBcc: 7326 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<8, 1>()) 7327 return Error(Operands[2]->getStartLoc(), "branch target out of range"); 7328 break; 7329 case ARM::t2Bcc: { 7330 int Op = (Operands[2]->isImm()) ? 2 : 3; 7331 if (!static_cast<ARMOperand &>(*Operands[Op]).isSignedOffset<20, 1>()) 7332 return Error(Operands[Op]->getStartLoc(), "branch target out of range"); 7333 break; 7334 } 7335 case ARM::tCBZ: 7336 case ARM::tCBNZ: { 7337 if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<6, 1>()) 7338 return Error(Operands[2]->getStartLoc(), "branch target out of range"); 7339 break; 7340 } 7341 case ARM::MOVi16: 7342 case ARM::MOVTi16: 7343 case ARM::t2MOVi16: 7344 case ARM::t2MOVTi16: 7345 { 7346 // We want to avoid misleadingly allowing something like "mov r0, <symbol>" 7347 // especially when we turn it into a movw and the expression <symbol> does 7348 // not have a :lower16: or :upper16 as part of the expression. We don't 7349 // want the behavior of silently truncating, which can be unexpected and 7350 // lead to bugs that are difficult to find since this is an easy mistake 7351 // to make. 7352 int i = (Operands[3]->isImm()) ? 3 : 4; 7353 ARMOperand &Op = static_cast<ARMOperand &>(*Operands[i]); 7354 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm()); 7355 if (CE) break; 7356 const MCExpr *E = dyn_cast<MCExpr>(Op.getImm()); 7357 if (!E) break; 7358 const ARMMCExpr *ARM16Expr = dyn_cast<ARMMCExpr>(E); 7359 if (!ARM16Expr || (ARM16Expr->getKind() != ARMMCExpr::VK_ARM_HI16 && 7360 ARM16Expr->getKind() != ARMMCExpr::VK_ARM_LO16)) 7361 return Error( 7362 Op.getStartLoc(), 7363 "immediate expression for mov requires :lower16: or :upper16"); 7364 break; 7365 } 7366 case ARM::HINT: 7367 case ARM::t2HINT: { 7368 unsigned Imm8 = Inst.getOperand(0).getImm(); 7369 unsigned Pred = Inst.getOperand(1).getImm(); 7370 // ESB is not predicable (pred must be AL). Without the RAS extension, this 7371 // behaves as any other unallocated hint. 7372 if (Imm8 == 0x10 && Pred != ARMCC::AL && hasRAS()) 7373 return Error(Operands[1]->getStartLoc(), "instruction 'esb' is not " 7374 "predicable, but condition " 7375 "code specified"); 7376 if (Imm8 == 0x14 && Pred != ARMCC::AL) 7377 return Error(Operands[1]->getStartLoc(), "instruction 'csdb' is not " 7378 "predicable, but condition " 7379 "code specified"); 7380 break; 7381 } 7382 case ARM::t2WLS: { 7383 int idx = Opcode == ARM::t2WLS ? 3 : 4; 7384 if (!static_cast<ARMOperand &>(*Operands[idx]).isUnsignedOffset<11, 1>()) 7385 return Error(Operands[idx]->getStartLoc(), 7386 "loop end is out of range or not a positive multiple of 2"); 7387 break; 7388 } 7389 case ARM::t2LEUpdate: { 7390 if (Inst.getOperand(2).isImm() && 7391 !(Inst.getOperand(2).getImm() < 0 && 7392 Inst.getOperand(2).getImm() >= -4094 && 7393 (Inst.getOperand(2).getImm() & 1) == 0)) 7394 return Error(Operands[2]->getStartLoc(), 7395 "loop start is out of range or not a negative multiple of 2"); 7396 break; 7397 } 7398 case ARM::t2BFi: 7399 case ARM::t2BFr: 7400 case ARM::t2BFLi: 7401 case ARM::t2BFLr: { 7402 if (!static_cast<ARMOperand &>(*Operands[2]).isUnsignedOffset<4, 1>() || 7403 (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0)) 7404 return Error(Operands[2]->getStartLoc(), 7405 "branch location out of range or not a multiple of 2"); 7406 7407 if (Opcode == ARM::t2BFi) { 7408 if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<16, 1>()) 7409 return Error(Operands[3]->getStartLoc(), 7410 "branch target out of range or not a multiple of 2"); 7411 } else if (Opcode == ARM::t2BFLi) { 7412 if (!static_cast<ARMOperand &>(*Operands[3]).isSignedOffset<18, 1>()) 7413 return Error(Operands[3]->getStartLoc(), 7414 "branch target out of range or not a multiple of 2"); 7415 } 7416 break; 7417 } 7418 case ARM::t2BFic: { 7419 if (!static_cast<ARMOperand &>(*Operands[1]).isUnsignedOffset<4, 1>() || 7420 (Inst.getOperand(0).isImm() && Inst.getOperand(0).getImm() == 0)) 7421 return Error(Operands[1]->getStartLoc(), 7422 "branch location out of range or not a multiple of 2"); 7423 7424 if (!static_cast<ARMOperand &>(*Operands[2]).isSignedOffset<16, 1>()) 7425 return Error(Operands[2]->getStartLoc(), 7426 "branch target out of range or not a multiple of 2"); 7427 7428 assert(Inst.getOperand(0).isImm() == Inst.getOperand(2).isImm() && 7429 "branch location and else branch target should either both be " 7430 "immediates or both labels"); 7431 7432 if (Inst.getOperand(0).isImm() && Inst.getOperand(2).isImm()) { 7433 int Diff = Inst.getOperand(2).getImm() - Inst.getOperand(0).getImm(); 7434 if (Diff != 4 && Diff != 2) 7435 return Error( 7436 Operands[3]->getStartLoc(), 7437 "else branch target must be 2 or 4 greater than the branch location"); 7438 } 7439 break; 7440 } 7441 case ARM::t2CLRM: { 7442 for (unsigned i = 2; i < Inst.getNumOperands(); i++) { 7443 if (Inst.getOperand(i).isReg() && 7444 !ARMMCRegisterClasses[ARM::GPRwithAPSRnospRegClassID].contains( 7445 Inst.getOperand(i).getReg())) { 7446 return Error(Operands[2]->getStartLoc(), 7447 "invalid register in register list. Valid registers are " 7448 "r0-r12, lr/r14 and APSR."); 7449 } 7450 } 7451 break; 7452 } 7453 case ARM::DSB: 7454 case ARM::t2DSB: { 7455 7456 if (Inst.getNumOperands() < 2) 7457 break; 7458 7459 unsigned Option = Inst.getOperand(0).getImm(); 7460 unsigned Pred = Inst.getOperand(1).getImm(); 7461 7462 // SSBB and PSSBB (DSB #0|#4) are not predicable (pred must be AL). 7463 if (Option == 0 && Pred != ARMCC::AL) 7464 return Error(Operands[1]->getStartLoc(), 7465 "instruction 'ssbb' is not predicable, but condition code " 7466 "specified"); 7467 if (Option == 4 && Pred != ARMCC::AL) 7468 return Error(Operands[1]->getStartLoc(), 7469 "instruction 'pssbb' is not predicable, but condition code " 7470 "specified"); 7471 break; 7472 } 7473 case ARM::VMOVRRS: { 7474 // Source registers must be sequential. 7475 const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(2).getReg()); 7476 const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(3).getReg()); 7477 if (Sm1 != Sm + 1) 7478 return Error(Operands[5]->getStartLoc(), 7479 "source operands must be sequential"); 7480 break; 7481 } 7482 case ARM::VMOVSRR: { 7483 // Destination registers must be sequential. 7484 const unsigned Sm = MRI->getEncodingValue(Inst.getOperand(0).getReg()); 7485 const unsigned Sm1 = MRI->getEncodingValue(Inst.getOperand(1).getReg()); 7486 if (Sm1 != Sm + 1) 7487 return Error(Operands[3]->getStartLoc(), 7488 "destination operands must be sequential"); 7489 break; 7490 } 7491 case ARM::VLDMDIA: 7492 case ARM::VSTMDIA: { 7493 ARMOperand &Op = static_cast<ARMOperand&>(*Operands[3]); 7494 auto &RegList = Op.getRegList(); 7495 if (RegList.size() < 1 || RegList.size() > 16) 7496 return Error(Operands[3]->getStartLoc(), 7497 "list of registers must be at least 1 and at most 16"); 7498 break; 7499 } 7500 } 7501 7502 return false; 7503 } 7504 7505 static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) { 7506 switch(Opc) { 7507 default: llvm_unreachable("unexpected opcode!"); 7508 // VST1LN 7509 case ARM::VST1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; 7510 case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; 7511 case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; 7512 case ARM::VST1LNdWB_register_Asm_8: Spacing = 1; return ARM::VST1LNd8_UPD; 7513 case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD; 7514 case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD; 7515 case ARM::VST1LNdAsm_8: Spacing = 1; return ARM::VST1LNd8; 7516 case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16; 7517 case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32; 7518 7519 // VST2LN 7520 case ARM::VST2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; 7521 case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; 7522 case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; 7523 case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; 7524 case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; 7525 7526 case ARM::VST2LNdWB_register_Asm_8: Spacing = 1; return ARM::VST2LNd8_UPD; 7527 case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD; 7528 case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD; 7529 case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD; 7530 case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD; 7531 7532 case ARM::VST2LNdAsm_8: Spacing = 1; return ARM::VST2LNd8; 7533 case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16; 7534 case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32; 7535 case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16; 7536 case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32; 7537 7538 // VST3LN 7539 case ARM::VST3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; 7540 case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; 7541 case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; 7542 case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD; 7543 case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; 7544 case ARM::VST3LNdWB_register_Asm_8: Spacing = 1; return ARM::VST3LNd8_UPD; 7545 case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD; 7546 case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD; 7547 case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD; 7548 case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD; 7549 case ARM::VST3LNdAsm_8: Spacing = 1; return ARM::VST3LNd8; 7550 case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16; 7551 case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32; 7552 case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16; 7553 case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32; 7554 7555 // VST3 7556 case ARM::VST3dWB_fixed_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; 7557 case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; 7558 case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; 7559 case ARM::VST3qWB_fixed_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; 7560 case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; 7561 case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; 7562 case ARM::VST3dWB_register_Asm_8: Spacing = 1; return ARM::VST3d8_UPD; 7563 case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD; 7564 case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD; 7565 case ARM::VST3qWB_register_Asm_8: Spacing = 2; return ARM::VST3q8_UPD; 7566 case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD; 7567 case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD; 7568 case ARM::VST3dAsm_8: Spacing = 1; return ARM::VST3d8; 7569 case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16; 7570 case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32; 7571 case ARM::VST3qAsm_8: Spacing = 2; return ARM::VST3q8; 7572 case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16; 7573 case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32; 7574 7575 // VST4LN 7576 case ARM::VST4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; 7577 case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; 7578 case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; 7579 case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD; 7580 case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; 7581 case ARM::VST4LNdWB_register_Asm_8: Spacing = 1; return ARM::VST4LNd8_UPD; 7582 case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD; 7583 case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD; 7584 case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD; 7585 case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD; 7586 case ARM::VST4LNdAsm_8: Spacing = 1; return ARM::VST4LNd8; 7587 case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16; 7588 case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32; 7589 case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16; 7590 case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32; 7591 7592 // VST4 7593 case ARM::VST4dWB_fixed_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; 7594 case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; 7595 case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; 7596 case ARM::VST4qWB_fixed_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; 7597 case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; 7598 case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; 7599 case ARM::VST4dWB_register_Asm_8: Spacing = 1; return ARM::VST4d8_UPD; 7600 case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD; 7601 case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD; 7602 case ARM::VST4qWB_register_Asm_8: Spacing = 2; return ARM::VST4q8_UPD; 7603 case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD; 7604 case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD; 7605 case ARM::VST4dAsm_8: Spacing = 1; return ARM::VST4d8; 7606 case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16; 7607 case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32; 7608 case ARM::VST4qAsm_8: Spacing = 2; return ARM::VST4q8; 7609 case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16; 7610 case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32; 7611 } 7612 } 7613 7614 static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) { 7615 switch(Opc) { 7616 default: llvm_unreachable("unexpected opcode!"); 7617 // VLD1LN 7618 case ARM::VLD1LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; 7619 case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; 7620 case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; 7621 case ARM::VLD1LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD1LNd8_UPD; 7622 case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD; 7623 case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD; 7624 case ARM::VLD1LNdAsm_8: Spacing = 1; return ARM::VLD1LNd8; 7625 case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16; 7626 case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32; 7627 7628 // VLD2LN 7629 case ARM::VLD2LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; 7630 case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; 7631 case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; 7632 case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD; 7633 case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; 7634 case ARM::VLD2LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD2LNd8_UPD; 7635 case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD; 7636 case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD; 7637 case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD; 7638 case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD; 7639 case ARM::VLD2LNdAsm_8: Spacing = 1; return ARM::VLD2LNd8; 7640 case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16; 7641 case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32; 7642 case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16; 7643 case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32; 7644 7645 // VLD3DUP 7646 case ARM::VLD3DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; 7647 case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; 7648 case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; 7649 case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD; 7650 case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD; 7651 case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; 7652 case ARM::VLD3DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD3DUPd8_UPD; 7653 case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD; 7654 case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD; 7655 case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD; 7656 case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD; 7657 case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD; 7658 case ARM::VLD3DUPdAsm_8: Spacing = 1; return ARM::VLD3DUPd8; 7659 case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16; 7660 case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32; 7661 case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8; 7662 case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16; 7663 case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32; 7664 7665 // VLD3LN 7666 case ARM::VLD3LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; 7667 case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; 7668 case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; 7669 case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD; 7670 case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; 7671 case ARM::VLD3LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD3LNd8_UPD; 7672 case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD; 7673 case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD; 7674 case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD; 7675 case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD; 7676 case ARM::VLD3LNdAsm_8: Spacing = 1; return ARM::VLD3LNd8; 7677 case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16; 7678 case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32; 7679 case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16; 7680 case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32; 7681 7682 // VLD3 7683 case ARM::VLD3dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; 7684 case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; 7685 case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; 7686 case ARM::VLD3qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; 7687 case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; 7688 case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; 7689 case ARM::VLD3dWB_register_Asm_8: Spacing = 1; return ARM::VLD3d8_UPD; 7690 case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD; 7691 case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD; 7692 case ARM::VLD3qWB_register_Asm_8: Spacing = 2; return ARM::VLD3q8_UPD; 7693 case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD; 7694 case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD; 7695 case ARM::VLD3dAsm_8: Spacing = 1; return ARM::VLD3d8; 7696 case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16; 7697 case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32; 7698 case ARM::VLD3qAsm_8: Spacing = 2; return ARM::VLD3q8; 7699 case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16; 7700 case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32; 7701 7702 // VLD4LN 7703 case ARM::VLD4LNdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; 7704 case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; 7705 case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; 7706 case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD; 7707 case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; 7708 case ARM::VLD4LNdWB_register_Asm_8: Spacing = 1; return ARM::VLD4LNd8_UPD; 7709 case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD; 7710 case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD; 7711 case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD; 7712 case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD; 7713 case ARM::VLD4LNdAsm_8: Spacing = 1; return ARM::VLD4LNd8; 7714 case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16; 7715 case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32; 7716 case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16; 7717 case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32; 7718 7719 // VLD4DUP 7720 case ARM::VLD4DUPdWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; 7721 case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; 7722 case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; 7723 case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD; 7724 case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD; 7725 case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; 7726 case ARM::VLD4DUPdWB_register_Asm_8: Spacing = 1; return ARM::VLD4DUPd8_UPD; 7727 case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD; 7728 case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD; 7729 case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD; 7730 case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD; 7731 case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD; 7732 case ARM::VLD4DUPdAsm_8: Spacing = 1; return ARM::VLD4DUPd8; 7733 case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16; 7734 case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32; 7735 case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8; 7736 case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16; 7737 case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32; 7738 7739 // VLD4 7740 case ARM::VLD4dWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; 7741 case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; 7742 case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; 7743 case ARM::VLD4qWB_fixed_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; 7744 case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; 7745 case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; 7746 case ARM::VLD4dWB_register_Asm_8: Spacing = 1; return ARM::VLD4d8_UPD; 7747 case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD; 7748 case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD; 7749 case ARM::VLD4qWB_register_Asm_8: Spacing = 2; return ARM::VLD4q8_UPD; 7750 case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD; 7751 case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD; 7752 case ARM::VLD4dAsm_8: Spacing = 1; return ARM::VLD4d8; 7753 case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16; 7754 case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32; 7755 case ARM::VLD4qAsm_8: Spacing = 2; return ARM::VLD4q8; 7756 case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16; 7757 case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32; 7758 } 7759 } 7760 7761 bool ARMAsmParser::processInstruction(MCInst &Inst, 7762 const OperandVector &Operands, 7763 MCStreamer &Out) { 7764 // Check if we have the wide qualifier, because if it's present we 7765 // must avoid selecting a 16-bit thumb instruction. 7766 bool HasWideQualifier = false; 7767 for (auto &Op : Operands) { 7768 ARMOperand &ARMOp = static_cast<ARMOperand&>(*Op); 7769 if (ARMOp.isToken() && ARMOp.getToken() == ".w") { 7770 HasWideQualifier = true; 7771 break; 7772 } 7773 } 7774 7775 switch (Inst.getOpcode()) { 7776 case ARM::MVE_VORNIZ0v4i32: 7777 case ARM::MVE_VORNIZ0v8i16: 7778 case ARM::MVE_VORNIZ8v4i32: 7779 case ARM::MVE_VORNIZ8v8i16: 7780 case ARM::MVE_VORNIZ16v4i32: 7781 case ARM::MVE_VORNIZ24v4i32: 7782 case ARM::MVE_VANDIZ0v4i32: 7783 case ARM::MVE_VANDIZ0v8i16: 7784 case ARM::MVE_VANDIZ8v4i32: 7785 case ARM::MVE_VANDIZ8v8i16: 7786 case ARM::MVE_VANDIZ16v4i32: 7787 case ARM::MVE_VANDIZ24v4i32: { 7788 unsigned Opcode; 7789 bool imm16 = false; 7790 switch(Inst.getOpcode()) { 7791 case ARM::MVE_VORNIZ0v4i32: Opcode = ARM::MVE_VORRIZ0v4i32; break; 7792 case ARM::MVE_VORNIZ0v8i16: Opcode = ARM::MVE_VORRIZ0v8i16; imm16 = true; break; 7793 case ARM::MVE_VORNIZ8v4i32: Opcode = ARM::MVE_VORRIZ8v4i32; break; 7794 case ARM::MVE_VORNIZ8v8i16: Opcode = ARM::MVE_VORRIZ8v8i16; imm16 = true; break; 7795 case ARM::MVE_VORNIZ16v4i32: Opcode = ARM::MVE_VORRIZ16v4i32; break; 7796 case ARM::MVE_VORNIZ24v4i32: Opcode = ARM::MVE_VORRIZ24v4i32; break; 7797 case ARM::MVE_VANDIZ0v4i32: Opcode = ARM::MVE_VBICIZ0v4i32; break; 7798 case ARM::MVE_VANDIZ0v8i16: Opcode = ARM::MVE_VBICIZ0v8i16; imm16 = true; break; 7799 case ARM::MVE_VANDIZ8v4i32: Opcode = ARM::MVE_VBICIZ8v4i32; break; 7800 case ARM::MVE_VANDIZ8v8i16: Opcode = ARM::MVE_VBICIZ8v8i16; imm16 = true; break; 7801 case ARM::MVE_VANDIZ16v4i32: Opcode = ARM::MVE_VBICIZ16v4i32; break; 7802 case ARM::MVE_VANDIZ24v4i32: Opcode = ARM::MVE_VBICIZ24v4i32; break; 7803 default: llvm_unreachable("unexpected opcode"); 7804 } 7805 7806 MCInst TmpInst; 7807 TmpInst.setOpcode(Opcode); 7808 TmpInst.addOperand(Inst.getOperand(0)); 7809 TmpInst.addOperand(Inst.getOperand(1)); 7810 7811 // invert immediate 7812 unsigned imm = ~Inst.getOperand(2).getImm() & (imm16 ? 0xffff : 0xffffffff); 7813 TmpInst.addOperand(MCOperand::createImm(imm)); 7814 7815 TmpInst.addOperand(Inst.getOperand(3)); 7816 TmpInst.addOperand(Inst.getOperand(4)); 7817 Inst = TmpInst; 7818 return true; 7819 } 7820 // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction. 7821 case ARM::LDRT_POST: 7822 case ARM::LDRBT_POST: { 7823 const unsigned Opcode = 7824 (Inst.getOpcode() == ARM::LDRT_POST) ? ARM::LDRT_POST_IMM 7825 : ARM::LDRBT_POST_IMM; 7826 MCInst TmpInst; 7827 TmpInst.setOpcode(Opcode); 7828 TmpInst.addOperand(Inst.getOperand(0)); 7829 TmpInst.addOperand(Inst.getOperand(1)); 7830 TmpInst.addOperand(Inst.getOperand(1)); 7831 TmpInst.addOperand(MCOperand::createReg(0)); 7832 TmpInst.addOperand(MCOperand::createImm(0)); 7833 TmpInst.addOperand(Inst.getOperand(2)); 7834 TmpInst.addOperand(Inst.getOperand(3)); 7835 Inst = TmpInst; 7836 return true; 7837 } 7838 // Alias for alternate form of 'str{,b}t Rt, [Rn], #imm' instruction. 7839 case ARM::STRT_POST: 7840 case ARM::STRBT_POST: { 7841 const unsigned Opcode = 7842 (Inst.getOpcode() == ARM::STRT_POST) ? ARM::STRT_POST_IMM 7843 : ARM::STRBT_POST_IMM; 7844 MCInst TmpInst; 7845 TmpInst.setOpcode(Opcode); 7846 TmpInst.addOperand(Inst.getOperand(1)); 7847 TmpInst.addOperand(Inst.getOperand(0)); 7848 TmpInst.addOperand(Inst.getOperand(1)); 7849 TmpInst.addOperand(MCOperand::createReg(0)); 7850 TmpInst.addOperand(MCOperand::createImm(0)); 7851 TmpInst.addOperand(Inst.getOperand(2)); 7852 TmpInst.addOperand(Inst.getOperand(3)); 7853 Inst = TmpInst; 7854 return true; 7855 } 7856 // Alias for alternate form of 'ADR Rd, #imm' instruction. 7857 case ARM::ADDri: { 7858 if (Inst.getOperand(1).getReg() != ARM::PC || 7859 Inst.getOperand(5).getReg() != 0 || 7860 !(Inst.getOperand(2).isExpr() || Inst.getOperand(2).isImm())) 7861 return false; 7862 MCInst TmpInst; 7863 TmpInst.setOpcode(ARM::ADR); 7864 TmpInst.addOperand(Inst.getOperand(0)); 7865 if (Inst.getOperand(2).isImm()) { 7866 // Immediate (mod_imm) will be in its encoded form, we must unencode it 7867 // before passing it to the ADR instruction. 7868 unsigned Enc = Inst.getOperand(2).getImm(); 7869 TmpInst.addOperand(MCOperand::createImm( 7870 ARM_AM::rotr32(Enc & 0xFF, (Enc & 0xF00) >> 7))); 7871 } else { 7872 // Turn PC-relative expression into absolute expression. 7873 // Reading PC provides the start of the current instruction + 8 and 7874 // the transform to adr is biased by that. 7875 MCSymbol *Dot = getContext().createTempSymbol(); 7876 Out.EmitLabel(Dot); 7877 const MCExpr *OpExpr = Inst.getOperand(2).getExpr(); 7878 const MCExpr *InstPC = MCSymbolRefExpr::create(Dot, 7879 MCSymbolRefExpr::VK_None, 7880 getContext()); 7881 const MCExpr *Const8 = MCConstantExpr::create(8, getContext()); 7882 const MCExpr *ReadPC = MCBinaryExpr::createAdd(InstPC, Const8, 7883 getContext()); 7884 const MCExpr *FixupAddr = MCBinaryExpr::createAdd(ReadPC, OpExpr, 7885 getContext()); 7886 TmpInst.addOperand(MCOperand::createExpr(FixupAddr)); 7887 } 7888 TmpInst.addOperand(Inst.getOperand(3)); 7889 TmpInst.addOperand(Inst.getOperand(4)); 7890 Inst = TmpInst; 7891 return true; 7892 } 7893 // Aliases for alternate PC+imm syntax of LDR instructions. 7894 case ARM::t2LDRpcrel: 7895 // Select the narrow version if the immediate will fit. 7896 if (Inst.getOperand(1).getImm() > 0 && 7897 Inst.getOperand(1).getImm() <= 0xff && 7898 !HasWideQualifier) 7899 Inst.setOpcode(ARM::tLDRpci); 7900 else 7901 Inst.setOpcode(ARM::t2LDRpci); 7902 return true; 7903 case ARM::t2LDRBpcrel: 7904 Inst.setOpcode(ARM::t2LDRBpci); 7905 return true; 7906 case ARM::t2LDRHpcrel: 7907 Inst.setOpcode(ARM::t2LDRHpci); 7908 return true; 7909 case ARM::t2LDRSBpcrel: 7910 Inst.setOpcode(ARM::t2LDRSBpci); 7911 return true; 7912 case ARM::t2LDRSHpcrel: 7913 Inst.setOpcode(ARM::t2LDRSHpci); 7914 return true; 7915 case ARM::LDRConstPool: 7916 case ARM::tLDRConstPool: 7917 case ARM::t2LDRConstPool: { 7918 // Pseudo instruction ldr rt, =immediate is converted to a 7919 // MOV rt, immediate if immediate is known and representable 7920 // otherwise we create a constant pool entry that we load from. 7921 MCInst TmpInst; 7922 if (Inst.getOpcode() == ARM::LDRConstPool) 7923 TmpInst.setOpcode(ARM::LDRi12); 7924 else if (Inst.getOpcode() == ARM::tLDRConstPool) 7925 TmpInst.setOpcode(ARM::tLDRpci); 7926 else if (Inst.getOpcode() == ARM::t2LDRConstPool) 7927 TmpInst.setOpcode(ARM::t2LDRpci); 7928 const ARMOperand &PoolOperand = 7929 (HasWideQualifier ? 7930 static_cast<ARMOperand &>(*Operands[4]) : 7931 static_cast<ARMOperand &>(*Operands[3])); 7932 const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm(); 7933 // If SubExprVal is a constant we may be able to use a MOV 7934 if (isa<MCConstantExpr>(SubExprVal) && 7935 Inst.getOperand(0).getReg() != ARM::PC && 7936 Inst.getOperand(0).getReg() != ARM::SP) { 7937 int64_t Value = 7938 (int64_t) (cast<MCConstantExpr>(SubExprVal))->getValue(); 7939 bool UseMov = true; 7940 bool MovHasS = true; 7941 if (Inst.getOpcode() == ARM::LDRConstPool) { 7942 // ARM Constant 7943 if (ARM_AM::getSOImmVal(Value) != -1) { 7944 Value = ARM_AM::getSOImmVal(Value); 7945 TmpInst.setOpcode(ARM::MOVi); 7946 } 7947 else if (ARM_AM::getSOImmVal(~Value) != -1) { 7948 Value = ARM_AM::getSOImmVal(~Value); 7949 TmpInst.setOpcode(ARM::MVNi); 7950 } 7951 else if (hasV6T2Ops() && 7952 Value >=0 && Value < 65536) { 7953 TmpInst.setOpcode(ARM::MOVi16); 7954 MovHasS = false; 7955 } 7956 else 7957 UseMov = false; 7958 } 7959 else { 7960 // Thumb/Thumb2 Constant 7961 if (hasThumb2() && 7962 ARM_AM::getT2SOImmVal(Value) != -1) 7963 TmpInst.setOpcode(ARM::t2MOVi); 7964 else if (hasThumb2() && 7965 ARM_AM::getT2SOImmVal(~Value) != -1) { 7966 TmpInst.setOpcode(ARM::t2MVNi); 7967 Value = ~Value; 7968 } 7969 else if (hasV8MBaseline() && 7970 Value >=0 && Value < 65536) { 7971 TmpInst.setOpcode(ARM::t2MOVi16); 7972 MovHasS = false; 7973 } 7974 else 7975 UseMov = false; 7976 } 7977 if (UseMov) { 7978 TmpInst.addOperand(Inst.getOperand(0)); // Rt 7979 TmpInst.addOperand(MCOperand::createImm(Value)); // Immediate 7980 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 7981 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 7982 if (MovHasS) 7983 TmpInst.addOperand(MCOperand::createReg(0)); // S 7984 Inst = TmpInst; 7985 return true; 7986 } 7987 } 7988 // No opportunity to use MOV/MVN create constant pool 7989 const MCExpr *CPLoc = 7990 getTargetStreamer().addConstantPoolEntry(SubExprVal, 7991 PoolOperand.getStartLoc()); 7992 TmpInst.addOperand(Inst.getOperand(0)); // Rt 7993 TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool 7994 if (TmpInst.getOpcode() == ARM::LDRi12) 7995 TmpInst.addOperand(MCOperand::createImm(0)); // unused offset 7996 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 7997 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 7998 Inst = TmpInst; 7999 return true; 8000 } 8001 // Handle NEON VST complex aliases. 8002 case ARM::VST1LNdWB_register_Asm_8: 8003 case ARM::VST1LNdWB_register_Asm_16: 8004 case ARM::VST1LNdWB_register_Asm_32: { 8005 MCInst TmpInst; 8006 // Shuffle the operands around so the lane index operand is in the 8007 // right place. 8008 unsigned Spacing; 8009 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8010 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8011 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8012 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8013 TmpInst.addOperand(Inst.getOperand(4)); // Rm 8014 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8015 TmpInst.addOperand(Inst.getOperand(1)); // lane 8016 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 8017 TmpInst.addOperand(Inst.getOperand(6)); 8018 Inst = TmpInst; 8019 return true; 8020 } 8021 8022 case ARM::VST2LNdWB_register_Asm_8: 8023 case ARM::VST2LNdWB_register_Asm_16: 8024 case ARM::VST2LNdWB_register_Asm_32: 8025 case ARM::VST2LNqWB_register_Asm_16: 8026 case ARM::VST2LNqWB_register_Asm_32: { 8027 MCInst TmpInst; 8028 // Shuffle the operands around so the lane index operand is in the 8029 // right place. 8030 unsigned Spacing; 8031 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8032 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8033 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8034 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8035 TmpInst.addOperand(Inst.getOperand(4)); // Rm 8036 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8037 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8038 Spacing)); 8039 TmpInst.addOperand(Inst.getOperand(1)); // lane 8040 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 8041 TmpInst.addOperand(Inst.getOperand(6)); 8042 Inst = TmpInst; 8043 return true; 8044 } 8045 8046 case ARM::VST3LNdWB_register_Asm_8: 8047 case ARM::VST3LNdWB_register_Asm_16: 8048 case ARM::VST3LNdWB_register_Asm_32: 8049 case ARM::VST3LNqWB_register_Asm_16: 8050 case ARM::VST3LNqWB_register_Asm_32: { 8051 MCInst TmpInst; 8052 // Shuffle the operands around so the lane index operand is in the 8053 // right place. 8054 unsigned Spacing; 8055 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8056 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8057 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8058 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8059 TmpInst.addOperand(Inst.getOperand(4)); // Rm 8060 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8061 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8062 Spacing)); 8063 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8064 Spacing * 2)); 8065 TmpInst.addOperand(Inst.getOperand(1)); // lane 8066 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 8067 TmpInst.addOperand(Inst.getOperand(6)); 8068 Inst = TmpInst; 8069 return true; 8070 } 8071 8072 case ARM::VST4LNdWB_register_Asm_8: 8073 case ARM::VST4LNdWB_register_Asm_16: 8074 case ARM::VST4LNdWB_register_Asm_32: 8075 case ARM::VST4LNqWB_register_Asm_16: 8076 case ARM::VST4LNqWB_register_Asm_32: { 8077 MCInst TmpInst; 8078 // Shuffle the operands around so the lane index operand is in the 8079 // right place. 8080 unsigned Spacing; 8081 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8082 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8083 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8084 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8085 TmpInst.addOperand(Inst.getOperand(4)); // Rm 8086 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8087 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8088 Spacing)); 8089 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8090 Spacing * 2)); 8091 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8092 Spacing * 3)); 8093 TmpInst.addOperand(Inst.getOperand(1)); // lane 8094 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 8095 TmpInst.addOperand(Inst.getOperand(6)); 8096 Inst = TmpInst; 8097 return true; 8098 } 8099 8100 case ARM::VST1LNdWB_fixed_Asm_8: 8101 case ARM::VST1LNdWB_fixed_Asm_16: 8102 case ARM::VST1LNdWB_fixed_Asm_32: { 8103 MCInst TmpInst; 8104 // Shuffle the operands around so the lane index operand is in the 8105 // right place. 8106 unsigned Spacing; 8107 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8108 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8109 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8110 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8111 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8112 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8113 TmpInst.addOperand(Inst.getOperand(1)); // lane 8114 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8115 TmpInst.addOperand(Inst.getOperand(5)); 8116 Inst = TmpInst; 8117 return true; 8118 } 8119 8120 case ARM::VST2LNdWB_fixed_Asm_8: 8121 case ARM::VST2LNdWB_fixed_Asm_16: 8122 case ARM::VST2LNdWB_fixed_Asm_32: 8123 case ARM::VST2LNqWB_fixed_Asm_16: 8124 case ARM::VST2LNqWB_fixed_Asm_32: { 8125 MCInst TmpInst; 8126 // Shuffle the operands around so the lane index operand is in the 8127 // right place. 8128 unsigned Spacing; 8129 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8130 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8131 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8132 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8133 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8134 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8135 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8136 Spacing)); 8137 TmpInst.addOperand(Inst.getOperand(1)); // lane 8138 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8139 TmpInst.addOperand(Inst.getOperand(5)); 8140 Inst = TmpInst; 8141 return true; 8142 } 8143 8144 case ARM::VST3LNdWB_fixed_Asm_8: 8145 case ARM::VST3LNdWB_fixed_Asm_16: 8146 case ARM::VST3LNdWB_fixed_Asm_32: 8147 case ARM::VST3LNqWB_fixed_Asm_16: 8148 case ARM::VST3LNqWB_fixed_Asm_32: { 8149 MCInst TmpInst; 8150 // Shuffle the operands around so the lane index operand is in the 8151 // right place. 8152 unsigned Spacing; 8153 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8154 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8155 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8156 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8157 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8158 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8159 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8160 Spacing)); 8161 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8162 Spacing * 2)); 8163 TmpInst.addOperand(Inst.getOperand(1)); // lane 8164 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8165 TmpInst.addOperand(Inst.getOperand(5)); 8166 Inst = TmpInst; 8167 return true; 8168 } 8169 8170 case ARM::VST4LNdWB_fixed_Asm_8: 8171 case ARM::VST4LNdWB_fixed_Asm_16: 8172 case ARM::VST4LNdWB_fixed_Asm_32: 8173 case ARM::VST4LNqWB_fixed_Asm_16: 8174 case ARM::VST4LNqWB_fixed_Asm_32: { 8175 MCInst TmpInst; 8176 // Shuffle the operands around so the lane index operand is in the 8177 // right place. 8178 unsigned Spacing; 8179 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8180 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8181 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8182 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8183 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8184 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8185 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8186 Spacing)); 8187 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8188 Spacing * 2)); 8189 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8190 Spacing * 3)); 8191 TmpInst.addOperand(Inst.getOperand(1)); // lane 8192 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8193 TmpInst.addOperand(Inst.getOperand(5)); 8194 Inst = TmpInst; 8195 return true; 8196 } 8197 8198 case ARM::VST1LNdAsm_8: 8199 case ARM::VST1LNdAsm_16: 8200 case ARM::VST1LNdAsm_32: { 8201 MCInst TmpInst; 8202 // Shuffle the operands around so the lane index operand is in the 8203 // right place. 8204 unsigned Spacing; 8205 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8206 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8207 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8208 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8209 TmpInst.addOperand(Inst.getOperand(1)); // lane 8210 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8211 TmpInst.addOperand(Inst.getOperand(5)); 8212 Inst = TmpInst; 8213 return true; 8214 } 8215 8216 case ARM::VST2LNdAsm_8: 8217 case ARM::VST2LNdAsm_16: 8218 case ARM::VST2LNdAsm_32: 8219 case ARM::VST2LNqAsm_16: 8220 case ARM::VST2LNqAsm_32: { 8221 MCInst TmpInst; 8222 // Shuffle the operands around so the lane index operand is in the 8223 // right place. 8224 unsigned Spacing; 8225 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8226 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8227 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8228 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8229 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8230 Spacing)); 8231 TmpInst.addOperand(Inst.getOperand(1)); // lane 8232 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8233 TmpInst.addOperand(Inst.getOperand(5)); 8234 Inst = TmpInst; 8235 return true; 8236 } 8237 8238 case ARM::VST3LNdAsm_8: 8239 case ARM::VST3LNdAsm_16: 8240 case ARM::VST3LNdAsm_32: 8241 case ARM::VST3LNqAsm_16: 8242 case ARM::VST3LNqAsm_32: { 8243 MCInst TmpInst; 8244 // Shuffle the operands around so the lane index operand is in the 8245 // right place. 8246 unsigned Spacing; 8247 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8248 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8249 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8250 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8251 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8252 Spacing)); 8253 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8254 Spacing * 2)); 8255 TmpInst.addOperand(Inst.getOperand(1)); // lane 8256 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8257 TmpInst.addOperand(Inst.getOperand(5)); 8258 Inst = TmpInst; 8259 return true; 8260 } 8261 8262 case ARM::VST4LNdAsm_8: 8263 case ARM::VST4LNdAsm_16: 8264 case ARM::VST4LNdAsm_32: 8265 case ARM::VST4LNqAsm_16: 8266 case ARM::VST4LNqAsm_32: { 8267 MCInst TmpInst; 8268 // Shuffle the operands around so the lane index operand is in the 8269 // right place. 8270 unsigned Spacing; 8271 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8272 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8273 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8274 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8275 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8276 Spacing)); 8277 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8278 Spacing * 2)); 8279 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8280 Spacing * 3)); 8281 TmpInst.addOperand(Inst.getOperand(1)); // lane 8282 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8283 TmpInst.addOperand(Inst.getOperand(5)); 8284 Inst = TmpInst; 8285 return true; 8286 } 8287 8288 // Handle NEON VLD complex aliases. 8289 case ARM::VLD1LNdWB_register_Asm_8: 8290 case ARM::VLD1LNdWB_register_Asm_16: 8291 case ARM::VLD1LNdWB_register_Asm_32: { 8292 MCInst TmpInst; 8293 // Shuffle the operands around so the lane index operand is in the 8294 // right place. 8295 unsigned Spacing; 8296 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8297 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8298 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8299 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8300 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8301 TmpInst.addOperand(Inst.getOperand(4)); // Rm 8302 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 8303 TmpInst.addOperand(Inst.getOperand(1)); // lane 8304 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 8305 TmpInst.addOperand(Inst.getOperand(6)); 8306 Inst = TmpInst; 8307 return true; 8308 } 8309 8310 case ARM::VLD2LNdWB_register_Asm_8: 8311 case ARM::VLD2LNdWB_register_Asm_16: 8312 case ARM::VLD2LNdWB_register_Asm_32: 8313 case ARM::VLD2LNqWB_register_Asm_16: 8314 case ARM::VLD2LNqWB_register_Asm_32: { 8315 MCInst TmpInst; 8316 // Shuffle the operands around so the lane index operand is in the 8317 // right place. 8318 unsigned Spacing; 8319 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8320 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8321 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8322 Spacing)); 8323 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8324 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8325 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8326 TmpInst.addOperand(Inst.getOperand(4)); // Rm 8327 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 8328 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8329 Spacing)); 8330 TmpInst.addOperand(Inst.getOperand(1)); // lane 8331 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 8332 TmpInst.addOperand(Inst.getOperand(6)); 8333 Inst = TmpInst; 8334 return true; 8335 } 8336 8337 case ARM::VLD3LNdWB_register_Asm_8: 8338 case ARM::VLD3LNdWB_register_Asm_16: 8339 case ARM::VLD3LNdWB_register_Asm_32: 8340 case ARM::VLD3LNqWB_register_Asm_16: 8341 case ARM::VLD3LNqWB_register_Asm_32: { 8342 MCInst TmpInst; 8343 // Shuffle the operands around so the lane index operand is in the 8344 // right place. 8345 unsigned Spacing; 8346 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8347 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8348 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8349 Spacing)); 8350 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8351 Spacing * 2)); 8352 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8353 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8354 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8355 TmpInst.addOperand(Inst.getOperand(4)); // Rm 8356 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 8357 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8358 Spacing)); 8359 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8360 Spacing * 2)); 8361 TmpInst.addOperand(Inst.getOperand(1)); // lane 8362 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 8363 TmpInst.addOperand(Inst.getOperand(6)); 8364 Inst = TmpInst; 8365 return true; 8366 } 8367 8368 case ARM::VLD4LNdWB_register_Asm_8: 8369 case ARM::VLD4LNdWB_register_Asm_16: 8370 case ARM::VLD4LNdWB_register_Asm_32: 8371 case ARM::VLD4LNqWB_register_Asm_16: 8372 case ARM::VLD4LNqWB_register_Asm_32: { 8373 MCInst TmpInst; 8374 // Shuffle the operands around so the lane index operand is in the 8375 // right place. 8376 unsigned Spacing; 8377 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8378 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8379 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8380 Spacing)); 8381 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8382 Spacing * 2)); 8383 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8384 Spacing * 3)); 8385 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8386 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8387 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8388 TmpInst.addOperand(Inst.getOperand(4)); // Rm 8389 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 8390 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8391 Spacing)); 8392 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8393 Spacing * 2)); 8394 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8395 Spacing * 3)); 8396 TmpInst.addOperand(Inst.getOperand(1)); // lane 8397 TmpInst.addOperand(Inst.getOperand(5)); // CondCode 8398 TmpInst.addOperand(Inst.getOperand(6)); 8399 Inst = TmpInst; 8400 return true; 8401 } 8402 8403 case ARM::VLD1LNdWB_fixed_Asm_8: 8404 case ARM::VLD1LNdWB_fixed_Asm_16: 8405 case ARM::VLD1LNdWB_fixed_Asm_32: { 8406 MCInst TmpInst; 8407 // Shuffle the operands around so the lane index operand is in the 8408 // right place. 8409 unsigned Spacing; 8410 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8411 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8412 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8413 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8414 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8415 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8416 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 8417 TmpInst.addOperand(Inst.getOperand(1)); // lane 8418 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8419 TmpInst.addOperand(Inst.getOperand(5)); 8420 Inst = TmpInst; 8421 return true; 8422 } 8423 8424 case ARM::VLD2LNdWB_fixed_Asm_8: 8425 case ARM::VLD2LNdWB_fixed_Asm_16: 8426 case ARM::VLD2LNdWB_fixed_Asm_32: 8427 case ARM::VLD2LNqWB_fixed_Asm_16: 8428 case ARM::VLD2LNqWB_fixed_Asm_32: { 8429 MCInst TmpInst; 8430 // Shuffle the operands around so the lane index operand is in the 8431 // right place. 8432 unsigned Spacing; 8433 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8434 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8435 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8436 Spacing)); 8437 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8438 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8439 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8440 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8441 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 8442 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8443 Spacing)); 8444 TmpInst.addOperand(Inst.getOperand(1)); // lane 8445 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8446 TmpInst.addOperand(Inst.getOperand(5)); 8447 Inst = TmpInst; 8448 return true; 8449 } 8450 8451 case ARM::VLD3LNdWB_fixed_Asm_8: 8452 case ARM::VLD3LNdWB_fixed_Asm_16: 8453 case ARM::VLD3LNdWB_fixed_Asm_32: 8454 case ARM::VLD3LNqWB_fixed_Asm_16: 8455 case ARM::VLD3LNqWB_fixed_Asm_32: { 8456 MCInst TmpInst; 8457 // Shuffle the operands around so the lane index operand is in the 8458 // right place. 8459 unsigned Spacing; 8460 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8461 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8462 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8463 Spacing)); 8464 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8465 Spacing * 2)); 8466 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8467 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8468 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8469 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8470 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 8471 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8472 Spacing)); 8473 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8474 Spacing * 2)); 8475 TmpInst.addOperand(Inst.getOperand(1)); // lane 8476 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8477 TmpInst.addOperand(Inst.getOperand(5)); 8478 Inst = TmpInst; 8479 return true; 8480 } 8481 8482 case ARM::VLD4LNdWB_fixed_Asm_8: 8483 case ARM::VLD4LNdWB_fixed_Asm_16: 8484 case ARM::VLD4LNdWB_fixed_Asm_32: 8485 case ARM::VLD4LNqWB_fixed_Asm_16: 8486 case ARM::VLD4LNqWB_fixed_Asm_32: { 8487 MCInst TmpInst; 8488 // Shuffle the operands around so the lane index operand is in the 8489 // right place. 8490 unsigned Spacing; 8491 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8492 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8493 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8494 Spacing)); 8495 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8496 Spacing * 2)); 8497 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8498 Spacing * 3)); 8499 TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb 8500 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8501 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8502 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8503 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 8504 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8505 Spacing)); 8506 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8507 Spacing * 2)); 8508 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8509 Spacing * 3)); 8510 TmpInst.addOperand(Inst.getOperand(1)); // lane 8511 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8512 TmpInst.addOperand(Inst.getOperand(5)); 8513 Inst = TmpInst; 8514 return true; 8515 } 8516 8517 case ARM::VLD1LNdAsm_8: 8518 case ARM::VLD1LNdAsm_16: 8519 case ARM::VLD1LNdAsm_32: { 8520 MCInst TmpInst; 8521 // Shuffle the operands around so the lane index operand is in the 8522 // right place. 8523 unsigned Spacing; 8524 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8525 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8526 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8527 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8528 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 8529 TmpInst.addOperand(Inst.getOperand(1)); // lane 8530 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8531 TmpInst.addOperand(Inst.getOperand(5)); 8532 Inst = TmpInst; 8533 return true; 8534 } 8535 8536 case ARM::VLD2LNdAsm_8: 8537 case ARM::VLD2LNdAsm_16: 8538 case ARM::VLD2LNdAsm_32: 8539 case ARM::VLD2LNqAsm_16: 8540 case ARM::VLD2LNqAsm_32: { 8541 MCInst TmpInst; 8542 // Shuffle the operands around so the lane index operand is in the 8543 // right place. 8544 unsigned Spacing; 8545 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8546 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8547 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8548 Spacing)); 8549 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8550 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8551 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 8552 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8553 Spacing)); 8554 TmpInst.addOperand(Inst.getOperand(1)); // lane 8555 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8556 TmpInst.addOperand(Inst.getOperand(5)); 8557 Inst = TmpInst; 8558 return true; 8559 } 8560 8561 case ARM::VLD3LNdAsm_8: 8562 case ARM::VLD3LNdAsm_16: 8563 case ARM::VLD3LNdAsm_32: 8564 case ARM::VLD3LNqAsm_16: 8565 case ARM::VLD3LNqAsm_32: { 8566 MCInst TmpInst; 8567 // Shuffle the operands around so the lane index operand is in the 8568 // right place. 8569 unsigned Spacing; 8570 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8571 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8572 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8573 Spacing)); 8574 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8575 Spacing * 2)); 8576 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8577 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8578 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 8579 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8580 Spacing)); 8581 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8582 Spacing * 2)); 8583 TmpInst.addOperand(Inst.getOperand(1)); // lane 8584 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8585 TmpInst.addOperand(Inst.getOperand(5)); 8586 Inst = TmpInst; 8587 return true; 8588 } 8589 8590 case ARM::VLD4LNdAsm_8: 8591 case ARM::VLD4LNdAsm_16: 8592 case ARM::VLD4LNdAsm_32: 8593 case ARM::VLD4LNqAsm_16: 8594 case ARM::VLD4LNqAsm_32: { 8595 MCInst TmpInst; 8596 // Shuffle the operands around so the lane index operand is in the 8597 // right place. 8598 unsigned Spacing; 8599 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8600 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8601 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8602 Spacing)); 8603 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8604 Spacing * 2)); 8605 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8606 Spacing * 3)); 8607 TmpInst.addOperand(Inst.getOperand(2)); // Rn 8608 TmpInst.addOperand(Inst.getOperand(3)); // alignment 8609 TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd) 8610 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8611 Spacing)); 8612 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8613 Spacing * 2)); 8614 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8615 Spacing * 3)); 8616 TmpInst.addOperand(Inst.getOperand(1)); // lane 8617 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8618 TmpInst.addOperand(Inst.getOperand(5)); 8619 Inst = TmpInst; 8620 return true; 8621 } 8622 8623 // VLD3DUP single 3-element structure to all lanes instructions. 8624 case ARM::VLD3DUPdAsm_8: 8625 case ARM::VLD3DUPdAsm_16: 8626 case ARM::VLD3DUPdAsm_32: 8627 case ARM::VLD3DUPqAsm_8: 8628 case ARM::VLD3DUPqAsm_16: 8629 case ARM::VLD3DUPqAsm_32: { 8630 MCInst TmpInst; 8631 unsigned Spacing; 8632 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8633 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8634 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8635 Spacing)); 8636 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8637 Spacing * 2)); 8638 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8639 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8640 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 8641 TmpInst.addOperand(Inst.getOperand(4)); 8642 Inst = TmpInst; 8643 return true; 8644 } 8645 8646 case ARM::VLD3DUPdWB_fixed_Asm_8: 8647 case ARM::VLD3DUPdWB_fixed_Asm_16: 8648 case ARM::VLD3DUPdWB_fixed_Asm_32: 8649 case ARM::VLD3DUPqWB_fixed_Asm_8: 8650 case ARM::VLD3DUPqWB_fixed_Asm_16: 8651 case ARM::VLD3DUPqWB_fixed_Asm_32: { 8652 MCInst TmpInst; 8653 unsigned Spacing; 8654 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8655 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8656 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8657 Spacing)); 8658 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8659 Spacing * 2)); 8660 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8661 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 8662 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8663 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8664 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 8665 TmpInst.addOperand(Inst.getOperand(4)); 8666 Inst = TmpInst; 8667 return true; 8668 } 8669 8670 case ARM::VLD3DUPdWB_register_Asm_8: 8671 case ARM::VLD3DUPdWB_register_Asm_16: 8672 case ARM::VLD3DUPdWB_register_Asm_32: 8673 case ARM::VLD3DUPqWB_register_Asm_8: 8674 case ARM::VLD3DUPqWB_register_Asm_16: 8675 case ARM::VLD3DUPqWB_register_Asm_32: { 8676 MCInst TmpInst; 8677 unsigned Spacing; 8678 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8679 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8680 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8681 Spacing)); 8682 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8683 Spacing * 2)); 8684 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8685 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 8686 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8687 TmpInst.addOperand(Inst.getOperand(3)); // Rm 8688 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8689 TmpInst.addOperand(Inst.getOperand(5)); 8690 Inst = TmpInst; 8691 return true; 8692 } 8693 8694 // VLD3 multiple 3-element structure instructions. 8695 case ARM::VLD3dAsm_8: 8696 case ARM::VLD3dAsm_16: 8697 case ARM::VLD3dAsm_32: 8698 case ARM::VLD3qAsm_8: 8699 case ARM::VLD3qAsm_16: 8700 case ARM::VLD3qAsm_32: { 8701 MCInst TmpInst; 8702 unsigned Spacing; 8703 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8704 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8705 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8706 Spacing)); 8707 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8708 Spacing * 2)); 8709 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8710 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8711 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 8712 TmpInst.addOperand(Inst.getOperand(4)); 8713 Inst = TmpInst; 8714 return true; 8715 } 8716 8717 case ARM::VLD3dWB_fixed_Asm_8: 8718 case ARM::VLD3dWB_fixed_Asm_16: 8719 case ARM::VLD3dWB_fixed_Asm_32: 8720 case ARM::VLD3qWB_fixed_Asm_8: 8721 case ARM::VLD3qWB_fixed_Asm_16: 8722 case ARM::VLD3qWB_fixed_Asm_32: { 8723 MCInst TmpInst; 8724 unsigned Spacing; 8725 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8726 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8727 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8728 Spacing)); 8729 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8730 Spacing * 2)); 8731 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8732 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 8733 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8734 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8735 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 8736 TmpInst.addOperand(Inst.getOperand(4)); 8737 Inst = TmpInst; 8738 return true; 8739 } 8740 8741 case ARM::VLD3dWB_register_Asm_8: 8742 case ARM::VLD3dWB_register_Asm_16: 8743 case ARM::VLD3dWB_register_Asm_32: 8744 case ARM::VLD3qWB_register_Asm_8: 8745 case ARM::VLD3qWB_register_Asm_16: 8746 case ARM::VLD3qWB_register_Asm_32: { 8747 MCInst TmpInst; 8748 unsigned Spacing; 8749 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8750 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8751 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8752 Spacing)); 8753 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8754 Spacing * 2)); 8755 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8756 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 8757 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8758 TmpInst.addOperand(Inst.getOperand(3)); // Rm 8759 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8760 TmpInst.addOperand(Inst.getOperand(5)); 8761 Inst = TmpInst; 8762 return true; 8763 } 8764 8765 // VLD4DUP single 3-element structure to all lanes instructions. 8766 case ARM::VLD4DUPdAsm_8: 8767 case ARM::VLD4DUPdAsm_16: 8768 case ARM::VLD4DUPdAsm_32: 8769 case ARM::VLD4DUPqAsm_8: 8770 case ARM::VLD4DUPqAsm_16: 8771 case ARM::VLD4DUPqAsm_32: { 8772 MCInst TmpInst; 8773 unsigned Spacing; 8774 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8775 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8776 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8777 Spacing)); 8778 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8779 Spacing * 2)); 8780 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8781 Spacing * 3)); 8782 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8783 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8784 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 8785 TmpInst.addOperand(Inst.getOperand(4)); 8786 Inst = TmpInst; 8787 return true; 8788 } 8789 8790 case ARM::VLD4DUPdWB_fixed_Asm_8: 8791 case ARM::VLD4DUPdWB_fixed_Asm_16: 8792 case ARM::VLD4DUPdWB_fixed_Asm_32: 8793 case ARM::VLD4DUPqWB_fixed_Asm_8: 8794 case ARM::VLD4DUPqWB_fixed_Asm_16: 8795 case ARM::VLD4DUPqWB_fixed_Asm_32: { 8796 MCInst TmpInst; 8797 unsigned Spacing; 8798 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8799 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8800 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8801 Spacing)); 8802 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8803 Spacing * 2)); 8804 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8805 Spacing * 3)); 8806 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8807 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 8808 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8809 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8810 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 8811 TmpInst.addOperand(Inst.getOperand(4)); 8812 Inst = TmpInst; 8813 return true; 8814 } 8815 8816 case ARM::VLD4DUPdWB_register_Asm_8: 8817 case ARM::VLD4DUPdWB_register_Asm_16: 8818 case ARM::VLD4DUPdWB_register_Asm_32: 8819 case ARM::VLD4DUPqWB_register_Asm_8: 8820 case ARM::VLD4DUPqWB_register_Asm_16: 8821 case ARM::VLD4DUPqWB_register_Asm_32: { 8822 MCInst TmpInst; 8823 unsigned Spacing; 8824 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8825 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8826 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8827 Spacing)); 8828 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8829 Spacing * 2)); 8830 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8831 Spacing * 3)); 8832 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8833 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 8834 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8835 TmpInst.addOperand(Inst.getOperand(3)); // Rm 8836 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8837 TmpInst.addOperand(Inst.getOperand(5)); 8838 Inst = TmpInst; 8839 return true; 8840 } 8841 8842 // VLD4 multiple 4-element structure instructions. 8843 case ARM::VLD4dAsm_8: 8844 case ARM::VLD4dAsm_16: 8845 case ARM::VLD4dAsm_32: 8846 case ARM::VLD4qAsm_8: 8847 case ARM::VLD4qAsm_16: 8848 case ARM::VLD4qAsm_32: { 8849 MCInst TmpInst; 8850 unsigned Spacing; 8851 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8852 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8853 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8854 Spacing)); 8855 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8856 Spacing * 2)); 8857 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8858 Spacing * 3)); 8859 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8860 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8861 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 8862 TmpInst.addOperand(Inst.getOperand(4)); 8863 Inst = TmpInst; 8864 return true; 8865 } 8866 8867 case ARM::VLD4dWB_fixed_Asm_8: 8868 case ARM::VLD4dWB_fixed_Asm_16: 8869 case ARM::VLD4dWB_fixed_Asm_32: 8870 case ARM::VLD4qWB_fixed_Asm_8: 8871 case ARM::VLD4qWB_fixed_Asm_16: 8872 case ARM::VLD4qWB_fixed_Asm_32: { 8873 MCInst TmpInst; 8874 unsigned Spacing; 8875 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8876 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8877 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8878 Spacing)); 8879 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8880 Spacing * 2)); 8881 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8882 Spacing * 3)); 8883 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8884 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 8885 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8886 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8887 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 8888 TmpInst.addOperand(Inst.getOperand(4)); 8889 Inst = TmpInst; 8890 return true; 8891 } 8892 8893 case ARM::VLD4dWB_register_Asm_8: 8894 case ARM::VLD4dWB_register_Asm_16: 8895 case ARM::VLD4dWB_register_Asm_32: 8896 case ARM::VLD4qWB_register_Asm_8: 8897 case ARM::VLD4qWB_register_Asm_16: 8898 case ARM::VLD4qWB_register_Asm_32: { 8899 MCInst TmpInst; 8900 unsigned Spacing; 8901 TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing)); 8902 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8903 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8904 Spacing)); 8905 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8906 Spacing * 2)); 8907 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8908 Spacing * 3)); 8909 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8910 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 8911 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8912 TmpInst.addOperand(Inst.getOperand(3)); // Rm 8913 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8914 TmpInst.addOperand(Inst.getOperand(5)); 8915 Inst = TmpInst; 8916 return true; 8917 } 8918 8919 // VST3 multiple 3-element structure instructions. 8920 case ARM::VST3dAsm_8: 8921 case ARM::VST3dAsm_16: 8922 case ARM::VST3dAsm_32: 8923 case ARM::VST3qAsm_8: 8924 case ARM::VST3qAsm_16: 8925 case ARM::VST3qAsm_32: { 8926 MCInst TmpInst; 8927 unsigned Spacing; 8928 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8929 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8930 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8931 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8932 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8933 Spacing)); 8934 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8935 Spacing * 2)); 8936 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 8937 TmpInst.addOperand(Inst.getOperand(4)); 8938 Inst = TmpInst; 8939 return true; 8940 } 8941 8942 case ARM::VST3dWB_fixed_Asm_8: 8943 case ARM::VST3dWB_fixed_Asm_16: 8944 case ARM::VST3dWB_fixed_Asm_32: 8945 case ARM::VST3qWB_fixed_Asm_8: 8946 case ARM::VST3qWB_fixed_Asm_16: 8947 case ARM::VST3qWB_fixed_Asm_32: { 8948 MCInst TmpInst; 8949 unsigned Spacing; 8950 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8951 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8952 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 8953 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8954 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 8955 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8956 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8957 Spacing)); 8958 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8959 Spacing * 2)); 8960 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 8961 TmpInst.addOperand(Inst.getOperand(4)); 8962 Inst = TmpInst; 8963 return true; 8964 } 8965 8966 case ARM::VST3dWB_register_Asm_8: 8967 case ARM::VST3dWB_register_Asm_16: 8968 case ARM::VST3dWB_register_Asm_32: 8969 case ARM::VST3qWB_register_Asm_8: 8970 case ARM::VST3qWB_register_Asm_16: 8971 case ARM::VST3qWB_register_Asm_32: { 8972 MCInst TmpInst; 8973 unsigned Spacing; 8974 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 8975 TmpInst.addOperand(Inst.getOperand(1)); // Rn 8976 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 8977 TmpInst.addOperand(Inst.getOperand(2)); // alignment 8978 TmpInst.addOperand(Inst.getOperand(3)); // Rm 8979 TmpInst.addOperand(Inst.getOperand(0)); // Vd 8980 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8981 Spacing)); 8982 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 8983 Spacing * 2)); 8984 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 8985 TmpInst.addOperand(Inst.getOperand(5)); 8986 Inst = TmpInst; 8987 return true; 8988 } 8989 8990 // VST4 multiple 3-element structure instructions. 8991 case ARM::VST4dAsm_8: 8992 case ARM::VST4dAsm_16: 8993 case ARM::VST4dAsm_32: 8994 case ARM::VST4qAsm_8: 8995 case ARM::VST4qAsm_16: 8996 case ARM::VST4qAsm_32: { 8997 MCInst TmpInst; 8998 unsigned Spacing; 8999 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 9000 TmpInst.addOperand(Inst.getOperand(1)); // Rn 9001 TmpInst.addOperand(Inst.getOperand(2)); // alignment 9002 TmpInst.addOperand(Inst.getOperand(0)); // Vd 9003 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 9004 Spacing)); 9005 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 9006 Spacing * 2)); 9007 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 9008 Spacing * 3)); 9009 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 9010 TmpInst.addOperand(Inst.getOperand(4)); 9011 Inst = TmpInst; 9012 return true; 9013 } 9014 9015 case ARM::VST4dWB_fixed_Asm_8: 9016 case ARM::VST4dWB_fixed_Asm_16: 9017 case ARM::VST4dWB_fixed_Asm_32: 9018 case ARM::VST4qWB_fixed_Asm_8: 9019 case ARM::VST4qWB_fixed_Asm_16: 9020 case ARM::VST4qWB_fixed_Asm_32: { 9021 MCInst TmpInst; 9022 unsigned Spacing; 9023 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 9024 TmpInst.addOperand(Inst.getOperand(1)); // Rn 9025 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 9026 TmpInst.addOperand(Inst.getOperand(2)); // alignment 9027 TmpInst.addOperand(MCOperand::createReg(0)); // Rm 9028 TmpInst.addOperand(Inst.getOperand(0)); // Vd 9029 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 9030 Spacing)); 9031 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 9032 Spacing * 2)); 9033 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 9034 Spacing * 3)); 9035 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 9036 TmpInst.addOperand(Inst.getOperand(4)); 9037 Inst = TmpInst; 9038 return true; 9039 } 9040 9041 case ARM::VST4dWB_register_Asm_8: 9042 case ARM::VST4dWB_register_Asm_16: 9043 case ARM::VST4dWB_register_Asm_32: 9044 case ARM::VST4qWB_register_Asm_8: 9045 case ARM::VST4qWB_register_Asm_16: 9046 case ARM::VST4qWB_register_Asm_32: { 9047 MCInst TmpInst; 9048 unsigned Spacing; 9049 TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing)); 9050 TmpInst.addOperand(Inst.getOperand(1)); // Rn 9051 TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn 9052 TmpInst.addOperand(Inst.getOperand(2)); // alignment 9053 TmpInst.addOperand(Inst.getOperand(3)); // Rm 9054 TmpInst.addOperand(Inst.getOperand(0)); // Vd 9055 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 9056 Spacing)); 9057 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 9058 Spacing * 2)); 9059 TmpInst.addOperand(MCOperand::createReg(Inst.getOperand(0).getReg() + 9060 Spacing * 3)); 9061 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 9062 TmpInst.addOperand(Inst.getOperand(5)); 9063 Inst = TmpInst; 9064 return true; 9065 } 9066 9067 // Handle encoding choice for the shift-immediate instructions. 9068 case ARM::t2LSLri: 9069 case ARM::t2LSRri: 9070 case ARM::t2ASRri: 9071 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 9072 isARMLowRegister(Inst.getOperand(1).getReg()) && 9073 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) && 9074 !HasWideQualifier) { 9075 unsigned NewOpc; 9076 switch (Inst.getOpcode()) { 9077 default: llvm_unreachable("unexpected opcode"); 9078 case ARM::t2LSLri: NewOpc = ARM::tLSLri; break; 9079 case ARM::t2LSRri: NewOpc = ARM::tLSRri; break; 9080 case ARM::t2ASRri: NewOpc = ARM::tASRri; break; 9081 } 9082 // The Thumb1 operands aren't in the same order. Awesome, eh? 9083 MCInst TmpInst; 9084 TmpInst.setOpcode(NewOpc); 9085 TmpInst.addOperand(Inst.getOperand(0)); 9086 TmpInst.addOperand(Inst.getOperand(5)); 9087 TmpInst.addOperand(Inst.getOperand(1)); 9088 TmpInst.addOperand(Inst.getOperand(2)); 9089 TmpInst.addOperand(Inst.getOperand(3)); 9090 TmpInst.addOperand(Inst.getOperand(4)); 9091 Inst = TmpInst; 9092 return true; 9093 } 9094 return false; 9095 9096 // Handle the Thumb2 mode MOV complex aliases. 9097 case ARM::t2MOVsr: 9098 case ARM::t2MOVSsr: { 9099 // Which instruction to expand to depends on the CCOut operand and 9100 // whether we're in an IT block if the register operands are low 9101 // registers. 9102 bool isNarrow = false; 9103 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 9104 isARMLowRegister(Inst.getOperand(1).getReg()) && 9105 isARMLowRegister(Inst.getOperand(2).getReg()) && 9106 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && 9107 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr) && 9108 !HasWideQualifier) 9109 isNarrow = true; 9110 MCInst TmpInst; 9111 unsigned newOpc; 9112 switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) { 9113 default: llvm_unreachable("unexpected opcode!"); 9114 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break; 9115 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break; 9116 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break; 9117 case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR : ARM::t2RORrr; break; 9118 } 9119 TmpInst.setOpcode(newOpc); 9120 TmpInst.addOperand(Inst.getOperand(0)); // Rd 9121 if (isNarrow) 9122 TmpInst.addOperand(MCOperand::createReg( 9123 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); 9124 TmpInst.addOperand(Inst.getOperand(1)); // Rn 9125 TmpInst.addOperand(Inst.getOperand(2)); // Rm 9126 TmpInst.addOperand(Inst.getOperand(4)); // CondCode 9127 TmpInst.addOperand(Inst.getOperand(5)); 9128 if (!isNarrow) 9129 TmpInst.addOperand(MCOperand::createReg( 9130 Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0)); 9131 Inst = TmpInst; 9132 return true; 9133 } 9134 case ARM::t2MOVsi: 9135 case ARM::t2MOVSsi: { 9136 // Which instruction to expand to depends on the CCOut operand and 9137 // whether we're in an IT block if the register operands are low 9138 // registers. 9139 bool isNarrow = false; 9140 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 9141 isARMLowRegister(Inst.getOperand(1).getReg()) && 9142 inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi) && 9143 !HasWideQualifier) 9144 isNarrow = true; 9145 MCInst TmpInst; 9146 unsigned newOpc; 9147 unsigned Shift = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm()); 9148 unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()); 9149 bool isMov = false; 9150 // MOV rd, rm, LSL #0 is actually a MOV instruction 9151 if (Shift == ARM_AM::lsl && Amount == 0) { 9152 isMov = true; 9153 // The 16-bit encoding of MOV rd, rm, LSL #N is explicitly encoding T2 of 9154 // MOV (register) in the ARMv8-A and ARMv8-M manuals, and immediate 0 is 9155 // unpredictable in an IT block so the 32-bit encoding T3 has to be used 9156 // instead. 9157 if (inITBlock()) { 9158 isNarrow = false; 9159 } 9160 newOpc = isNarrow ? ARM::tMOVSr : ARM::t2MOVr; 9161 } else { 9162 switch(Shift) { 9163 default: llvm_unreachable("unexpected opcode!"); 9164 case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break; 9165 case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break; 9166 case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break; 9167 case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break; 9168 case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break; 9169 } 9170 } 9171 if (Amount == 32) Amount = 0; 9172 TmpInst.setOpcode(newOpc); 9173 TmpInst.addOperand(Inst.getOperand(0)); // Rd 9174 if (isNarrow && !isMov) 9175 TmpInst.addOperand(MCOperand::createReg( 9176 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 9177 TmpInst.addOperand(Inst.getOperand(1)); // Rn 9178 if (newOpc != ARM::t2RRX && !isMov) 9179 TmpInst.addOperand(MCOperand::createImm(Amount)); 9180 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 9181 TmpInst.addOperand(Inst.getOperand(4)); 9182 if (!isNarrow) 9183 TmpInst.addOperand(MCOperand::createReg( 9184 Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0)); 9185 Inst = TmpInst; 9186 return true; 9187 } 9188 // Handle the ARM mode MOV complex aliases. 9189 case ARM::ASRr: 9190 case ARM::LSRr: 9191 case ARM::LSLr: 9192 case ARM::RORr: { 9193 ARM_AM::ShiftOpc ShiftTy; 9194 switch(Inst.getOpcode()) { 9195 default: llvm_unreachable("unexpected opcode!"); 9196 case ARM::ASRr: ShiftTy = ARM_AM::asr; break; 9197 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; 9198 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; 9199 case ARM::RORr: ShiftTy = ARM_AM::ror; break; 9200 } 9201 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); 9202 MCInst TmpInst; 9203 TmpInst.setOpcode(ARM::MOVsr); 9204 TmpInst.addOperand(Inst.getOperand(0)); // Rd 9205 TmpInst.addOperand(Inst.getOperand(1)); // Rn 9206 TmpInst.addOperand(Inst.getOperand(2)); // Rm 9207 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty 9208 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 9209 TmpInst.addOperand(Inst.getOperand(4)); 9210 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 9211 Inst = TmpInst; 9212 return true; 9213 } 9214 case ARM::ASRi: 9215 case ARM::LSRi: 9216 case ARM::LSLi: 9217 case ARM::RORi: { 9218 ARM_AM::ShiftOpc ShiftTy; 9219 switch(Inst.getOpcode()) { 9220 default: llvm_unreachable("unexpected opcode!"); 9221 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 9222 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 9223 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 9224 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 9225 } 9226 // A shift by zero is a plain MOVr, not a MOVsi. 9227 unsigned Amt = Inst.getOperand(2).getImm(); 9228 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 9229 // A shift by 32 should be encoded as 0 when permitted 9230 if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr)) 9231 Amt = 0; 9232 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 9233 MCInst TmpInst; 9234 TmpInst.setOpcode(Opc); 9235 TmpInst.addOperand(Inst.getOperand(0)); // Rd 9236 TmpInst.addOperand(Inst.getOperand(1)); // Rn 9237 if (Opc == ARM::MOVsi) 9238 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty 9239 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 9240 TmpInst.addOperand(Inst.getOperand(4)); 9241 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 9242 Inst = TmpInst; 9243 return true; 9244 } 9245 case ARM::RRXi: { 9246 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); 9247 MCInst TmpInst; 9248 TmpInst.setOpcode(ARM::MOVsi); 9249 TmpInst.addOperand(Inst.getOperand(0)); // Rd 9250 TmpInst.addOperand(Inst.getOperand(1)); // Rn 9251 TmpInst.addOperand(MCOperand::createImm(Shifter)); // Shift value and ty 9252 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 9253 TmpInst.addOperand(Inst.getOperand(3)); 9254 TmpInst.addOperand(Inst.getOperand(4)); // cc_out 9255 Inst = TmpInst; 9256 return true; 9257 } 9258 case ARM::t2LDMIA_UPD: { 9259 // If this is a load of a single register, then we should use 9260 // a post-indexed LDR instruction instead, per the ARM ARM. 9261 if (Inst.getNumOperands() != 5) 9262 return false; 9263 MCInst TmpInst; 9264 TmpInst.setOpcode(ARM::t2LDR_POST); 9265 TmpInst.addOperand(Inst.getOperand(4)); // Rt 9266 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 9267 TmpInst.addOperand(Inst.getOperand(1)); // Rn 9268 TmpInst.addOperand(MCOperand::createImm(4)); 9269 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 9270 TmpInst.addOperand(Inst.getOperand(3)); 9271 Inst = TmpInst; 9272 return true; 9273 } 9274 case ARM::t2STMDB_UPD: { 9275 // If this is a store of a single register, then we should use 9276 // a pre-indexed STR instruction instead, per the ARM ARM. 9277 if (Inst.getNumOperands() != 5) 9278 return false; 9279 MCInst TmpInst; 9280 TmpInst.setOpcode(ARM::t2STR_PRE); 9281 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 9282 TmpInst.addOperand(Inst.getOperand(4)); // Rt 9283 TmpInst.addOperand(Inst.getOperand(1)); // Rn 9284 TmpInst.addOperand(MCOperand::createImm(-4)); 9285 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 9286 TmpInst.addOperand(Inst.getOperand(3)); 9287 Inst = TmpInst; 9288 return true; 9289 } 9290 case ARM::LDMIA_UPD: 9291 // If this is a load of a single register via a 'pop', then we should use 9292 // a post-indexed LDR instruction instead, per the ARM ARM. 9293 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "pop" && 9294 Inst.getNumOperands() == 5) { 9295 MCInst TmpInst; 9296 TmpInst.setOpcode(ARM::LDR_POST_IMM); 9297 TmpInst.addOperand(Inst.getOperand(4)); // Rt 9298 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 9299 TmpInst.addOperand(Inst.getOperand(1)); // Rn 9300 TmpInst.addOperand(MCOperand::createReg(0)); // am2offset 9301 TmpInst.addOperand(MCOperand::createImm(4)); 9302 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 9303 TmpInst.addOperand(Inst.getOperand(3)); 9304 Inst = TmpInst; 9305 return true; 9306 } 9307 break; 9308 case ARM::STMDB_UPD: 9309 // If this is a store of a single register via a 'push', then we should use 9310 // a pre-indexed STR instruction instead, per the ARM ARM. 9311 if (static_cast<ARMOperand &>(*Operands[0]).getToken() == "push" && 9312 Inst.getNumOperands() == 5) { 9313 MCInst TmpInst; 9314 TmpInst.setOpcode(ARM::STR_PRE_IMM); 9315 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 9316 TmpInst.addOperand(Inst.getOperand(4)); // Rt 9317 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 9318 TmpInst.addOperand(MCOperand::createImm(-4)); 9319 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 9320 TmpInst.addOperand(Inst.getOperand(3)); 9321 Inst = TmpInst; 9322 } 9323 break; 9324 case ARM::t2ADDri12: 9325 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add" 9326 // mnemonic was used (not "addw"), encoding T3 is preferred. 9327 if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "add" || 9328 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 9329 break; 9330 Inst.setOpcode(ARM::t2ADDri); 9331 Inst.addOperand(MCOperand::createReg(0)); // cc_out 9332 break; 9333 case ARM::t2SUBri12: 9334 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub" 9335 // mnemonic was used (not "subw"), encoding T3 is preferred. 9336 if (static_cast<ARMOperand &>(*Operands[0]).getToken() != "sub" || 9337 ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1) 9338 break; 9339 Inst.setOpcode(ARM::t2SUBri); 9340 Inst.addOperand(MCOperand::createReg(0)); // cc_out 9341 break; 9342 case ARM::tADDi8: 9343 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 9344 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 9345 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 9346 // to encoding T1 if <Rd> is omitted." 9347 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 9348 Inst.setOpcode(ARM::tADDi3); 9349 return true; 9350 } 9351 break; 9352 case ARM::tSUBi8: 9353 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 9354 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 9355 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 9356 // to encoding T1 if <Rd> is omitted." 9357 if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 9358 Inst.setOpcode(ARM::tSUBi3); 9359 return true; 9360 } 9361 break; 9362 case ARM::t2ADDri: 9363 case ARM::t2SUBri: { 9364 // If the destination and first source operand are the same, and 9365 // the flags are compatible with the current IT status, use encoding T2 9366 // instead of T3. For compatibility with the system 'as'. Make sure the 9367 // wide encoding wasn't explicit. 9368 if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() || 9369 !isARMLowRegister(Inst.getOperand(0).getReg()) || 9370 (Inst.getOperand(2).isImm() && 9371 (unsigned)Inst.getOperand(2).getImm() > 255) || 9372 Inst.getOperand(5).getReg() != (inITBlock() ? 0 : ARM::CPSR) || 9373 HasWideQualifier) 9374 break; 9375 MCInst TmpInst; 9376 TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ? 9377 ARM::tADDi8 : ARM::tSUBi8); 9378 TmpInst.addOperand(Inst.getOperand(0)); 9379 TmpInst.addOperand(Inst.getOperand(5)); 9380 TmpInst.addOperand(Inst.getOperand(0)); 9381 TmpInst.addOperand(Inst.getOperand(2)); 9382 TmpInst.addOperand(Inst.getOperand(3)); 9383 TmpInst.addOperand(Inst.getOperand(4)); 9384 Inst = TmpInst; 9385 return true; 9386 } 9387 case ARM::t2ADDrr: { 9388 // If the destination and first source operand are the same, and 9389 // there's no setting of the flags, use encoding T2 instead of T3. 9390 // Note that this is only for ADD, not SUB. This mirrors the system 9391 // 'as' behaviour. Also take advantage of ADD being commutative. 9392 // Make sure the wide encoding wasn't explicit. 9393 bool Swap = false; 9394 auto DestReg = Inst.getOperand(0).getReg(); 9395 bool Transform = DestReg == Inst.getOperand(1).getReg(); 9396 if (!Transform && DestReg == Inst.getOperand(2).getReg()) { 9397 Transform = true; 9398 Swap = true; 9399 } 9400 if (!Transform || 9401 Inst.getOperand(5).getReg() != 0 || 9402 HasWideQualifier) 9403 break; 9404 MCInst TmpInst; 9405 TmpInst.setOpcode(ARM::tADDhirr); 9406 TmpInst.addOperand(Inst.getOperand(0)); 9407 TmpInst.addOperand(Inst.getOperand(0)); 9408 TmpInst.addOperand(Inst.getOperand(Swap ? 1 : 2)); 9409 TmpInst.addOperand(Inst.getOperand(3)); 9410 TmpInst.addOperand(Inst.getOperand(4)); 9411 Inst = TmpInst; 9412 return true; 9413 } 9414 case ARM::tADDrSP: 9415 // If the non-SP source operand and the destination operand are not the 9416 // same, we need to use the 32-bit encoding if it's available. 9417 if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) { 9418 Inst.setOpcode(ARM::t2ADDrr); 9419 Inst.addOperand(MCOperand::createReg(0)); // cc_out 9420 return true; 9421 } 9422 break; 9423 case ARM::tB: 9424 // A Thumb conditional branch outside of an IT block is a tBcc. 9425 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 9426 Inst.setOpcode(ARM::tBcc); 9427 return true; 9428 } 9429 break; 9430 case ARM::t2B: 9431 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 9432 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 9433 Inst.setOpcode(ARM::t2Bcc); 9434 return true; 9435 } 9436 break; 9437 case ARM::t2Bcc: 9438 // If the conditional is AL or we're in an IT block, we really want t2B. 9439 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 9440 Inst.setOpcode(ARM::t2B); 9441 return true; 9442 } 9443 break; 9444 case ARM::tBcc: 9445 // If the conditional is AL, we really want tB. 9446 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 9447 Inst.setOpcode(ARM::tB); 9448 return true; 9449 } 9450 break; 9451 case ARM::tLDMIA: { 9452 // If the register list contains any high registers, or if the writeback 9453 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 9454 // instead if we're in Thumb2. Otherwise, this should have generated 9455 // an error in validateInstruction(). 9456 unsigned Rn = Inst.getOperand(0).getReg(); 9457 bool hasWritebackToken = 9458 (static_cast<ARMOperand &>(*Operands[3]).isToken() && 9459 static_cast<ARMOperand &>(*Operands[3]).getToken() == "!"); 9460 bool listContainsBase; 9461 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 9462 (!listContainsBase && !hasWritebackToken) || 9463 (listContainsBase && hasWritebackToken)) { 9464 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 9465 assert(isThumbTwo()); 9466 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 9467 // If we're switching to the updating version, we need to insert 9468 // the writeback tied operand. 9469 if (hasWritebackToken) 9470 Inst.insert(Inst.begin(), 9471 MCOperand::createReg(Inst.getOperand(0).getReg())); 9472 return true; 9473 } 9474 break; 9475 } 9476 case ARM::tSTMIA_UPD: { 9477 // If the register list contains any high registers, we need to use 9478 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 9479 // should have generated an error in validateInstruction(). 9480 unsigned Rn = Inst.getOperand(0).getReg(); 9481 bool listContainsBase; 9482 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 9483 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 9484 assert(isThumbTwo()); 9485 Inst.setOpcode(ARM::t2STMIA_UPD); 9486 return true; 9487 } 9488 break; 9489 } 9490 case ARM::tPOP: { 9491 bool listContainsBase; 9492 // If the register list contains any high registers, we need to use 9493 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 9494 // should have generated an error in validateInstruction(). 9495 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 9496 return false; 9497 assert(isThumbTwo()); 9498 Inst.setOpcode(ARM::t2LDMIA_UPD); 9499 // Add the base register and writeback operands. 9500 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP)); 9501 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP)); 9502 return true; 9503 } 9504 case ARM::tPUSH: { 9505 bool listContainsBase; 9506 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 9507 return false; 9508 assert(isThumbTwo()); 9509 Inst.setOpcode(ARM::t2STMDB_UPD); 9510 // Add the base register and writeback operands. 9511 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP)); 9512 Inst.insert(Inst.begin(), MCOperand::createReg(ARM::SP)); 9513 return true; 9514 } 9515 case ARM::t2MOVi: 9516 // If we can use the 16-bit encoding and the user didn't explicitly 9517 // request the 32-bit variant, transform it here. 9518 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 9519 (Inst.getOperand(1).isImm() && 9520 (unsigned)Inst.getOperand(1).getImm() <= 255) && 9521 Inst.getOperand(4).getReg() == (inITBlock() ? 0 : ARM::CPSR) && 9522 !HasWideQualifier) { 9523 // The operands aren't in the same order for tMOVi8... 9524 MCInst TmpInst; 9525 TmpInst.setOpcode(ARM::tMOVi8); 9526 TmpInst.addOperand(Inst.getOperand(0)); 9527 TmpInst.addOperand(Inst.getOperand(4)); 9528 TmpInst.addOperand(Inst.getOperand(1)); 9529 TmpInst.addOperand(Inst.getOperand(2)); 9530 TmpInst.addOperand(Inst.getOperand(3)); 9531 Inst = TmpInst; 9532 return true; 9533 } 9534 break; 9535 9536 case ARM::t2MOVr: 9537 // If we can use the 16-bit encoding and the user didn't explicitly 9538 // request the 32-bit variant, transform it here. 9539 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 9540 isARMLowRegister(Inst.getOperand(1).getReg()) && 9541 Inst.getOperand(2).getImm() == ARMCC::AL && 9542 Inst.getOperand(4).getReg() == ARM::CPSR && 9543 !HasWideQualifier) { 9544 // The operands aren't the same for tMOV[S]r... (no cc_out) 9545 MCInst TmpInst; 9546 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 9547 TmpInst.addOperand(Inst.getOperand(0)); 9548 TmpInst.addOperand(Inst.getOperand(1)); 9549 TmpInst.addOperand(Inst.getOperand(2)); 9550 TmpInst.addOperand(Inst.getOperand(3)); 9551 Inst = TmpInst; 9552 return true; 9553 } 9554 break; 9555 9556 case ARM::t2SXTH: 9557 case ARM::t2SXTB: 9558 case ARM::t2UXTH: 9559 case ARM::t2UXTB: 9560 // If we can use the 16-bit encoding and the user didn't explicitly 9561 // request the 32-bit variant, transform it here. 9562 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 9563 isARMLowRegister(Inst.getOperand(1).getReg()) && 9564 Inst.getOperand(2).getImm() == 0 && 9565 !HasWideQualifier) { 9566 unsigned NewOpc; 9567 switch (Inst.getOpcode()) { 9568 default: llvm_unreachable("Illegal opcode!"); 9569 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 9570 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 9571 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 9572 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 9573 } 9574 // The operands aren't the same for thumb1 (no rotate operand). 9575 MCInst TmpInst; 9576 TmpInst.setOpcode(NewOpc); 9577 TmpInst.addOperand(Inst.getOperand(0)); 9578 TmpInst.addOperand(Inst.getOperand(1)); 9579 TmpInst.addOperand(Inst.getOperand(3)); 9580 TmpInst.addOperand(Inst.getOperand(4)); 9581 Inst = TmpInst; 9582 return true; 9583 } 9584 break; 9585 9586 case ARM::MOVsi: { 9587 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm()); 9588 // rrx shifts and asr/lsr of #32 is encoded as 0 9589 if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr) 9590 return false; 9591 if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) { 9592 // Shifting by zero is accepted as a vanilla 'MOVr' 9593 MCInst TmpInst; 9594 TmpInst.setOpcode(ARM::MOVr); 9595 TmpInst.addOperand(Inst.getOperand(0)); 9596 TmpInst.addOperand(Inst.getOperand(1)); 9597 TmpInst.addOperand(Inst.getOperand(3)); 9598 TmpInst.addOperand(Inst.getOperand(4)); 9599 TmpInst.addOperand(Inst.getOperand(5)); 9600 Inst = TmpInst; 9601 return true; 9602 } 9603 return false; 9604 } 9605 case ARM::ANDrsi: 9606 case ARM::ORRrsi: 9607 case ARM::EORrsi: 9608 case ARM::BICrsi: 9609 case ARM::SUBrsi: 9610 case ARM::ADDrsi: { 9611 unsigned newOpc; 9612 ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm()); 9613 if (SOpc == ARM_AM::rrx) return false; 9614 switch (Inst.getOpcode()) { 9615 default: llvm_unreachable("unexpected opcode!"); 9616 case ARM::ANDrsi: newOpc = ARM::ANDrr; break; 9617 case ARM::ORRrsi: newOpc = ARM::ORRrr; break; 9618 case ARM::EORrsi: newOpc = ARM::EORrr; break; 9619 case ARM::BICrsi: newOpc = ARM::BICrr; break; 9620 case ARM::SUBrsi: newOpc = ARM::SUBrr; break; 9621 case ARM::ADDrsi: newOpc = ARM::ADDrr; break; 9622 } 9623 // If the shift is by zero, use the non-shifted instruction definition. 9624 // The exception is for right shifts, where 0 == 32 9625 if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 && 9626 !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) { 9627 MCInst TmpInst; 9628 TmpInst.setOpcode(newOpc); 9629 TmpInst.addOperand(Inst.getOperand(0)); 9630 TmpInst.addOperand(Inst.getOperand(1)); 9631 TmpInst.addOperand(Inst.getOperand(2)); 9632 TmpInst.addOperand(Inst.getOperand(4)); 9633 TmpInst.addOperand(Inst.getOperand(5)); 9634 TmpInst.addOperand(Inst.getOperand(6)); 9635 Inst = TmpInst; 9636 return true; 9637 } 9638 return false; 9639 } 9640 case ARM::ITasm: 9641 case ARM::t2IT: { 9642 // Set up the IT block state according to the IT instruction we just 9643 // matched. 9644 assert(!inITBlock() && "nested IT blocks?!"); 9645 startExplicitITBlock(ARMCC::CondCodes(Inst.getOperand(0).getImm()), 9646 Inst.getOperand(1).getImm()); 9647 break; 9648 } 9649 case ARM::t2LSLrr: 9650 case ARM::t2LSRrr: 9651 case ARM::t2ASRrr: 9652 case ARM::t2SBCrr: 9653 case ARM::t2RORrr: 9654 case ARM::t2BICrr: 9655 // Assemblers should use the narrow encodings of these instructions when permissible. 9656 if ((isARMLowRegister(Inst.getOperand(1).getReg()) && 9657 isARMLowRegister(Inst.getOperand(2).getReg())) && 9658 Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() && 9659 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) && 9660 !HasWideQualifier) { 9661 unsigned NewOpc; 9662 switch (Inst.getOpcode()) { 9663 default: llvm_unreachable("unexpected opcode"); 9664 case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break; 9665 case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break; 9666 case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break; 9667 case ARM::t2SBCrr: NewOpc = ARM::tSBC; break; 9668 case ARM::t2RORrr: NewOpc = ARM::tROR; break; 9669 case ARM::t2BICrr: NewOpc = ARM::tBIC; break; 9670 } 9671 MCInst TmpInst; 9672 TmpInst.setOpcode(NewOpc); 9673 TmpInst.addOperand(Inst.getOperand(0)); 9674 TmpInst.addOperand(Inst.getOperand(5)); 9675 TmpInst.addOperand(Inst.getOperand(1)); 9676 TmpInst.addOperand(Inst.getOperand(2)); 9677 TmpInst.addOperand(Inst.getOperand(3)); 9678 TmpInst.addOperand(Inst.getOperand(4)); 9679 Inst = TmpInst; 9680 return true; 9681 } 9682 return false; 9683 9684 case ARM::t2ANDrr: 9685 case ARM::t2EORrr: 9686 case ARM::t2ADCrr: 9687 case ARM::t2ORRrr: 9688 // Assemblers should use the narrow encodings of these instructions when permissible. 9689 // These instructions are special in that they are commutable, so shorter encodings 9690 // are available more often. 9691 if ((isARMLowRegister(Inst.getOperand(1).getReg()) && 9692 isARMLowRegister(Inst.getOperand(2).getReg())) && 9693 (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() || 9694 Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) && 9695 Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) && 9696 !HasWideQualifier) { 9697 unsigned NewOpc; 9698 switch (Inst.getOpcode()) { 9699 default: llvm_unreachable("unexpected opcode"); 9700 case ARM::t2ADCrr: NewOpc = ARM::tADC; break; 9701 case ARM::t2ANDrr: NewOpc = ARM::tAND; break; 9702 case ARM::t2EORrr: NewOpc = ARM::tEOR; break; 9703 case ARM::t2ORRrr: NewOpc = ARM::tORR; break; 9704 } 9705 MCInst TmpInst; 9706 TmpInst.setOpcode(NewOpc); 9707 TmpInst.addOperand(Inst.getOperand(0)); 9708 TmpInst.addOperand(Inst.getOperand(5)); 9709 if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) { 9710 TmpInst.addOperand(Inst.getOperand(1)); 9711 TmpInst.addOperand(Inst.getOperand(2)); 9712 } else { 9713 TmpInst.addOperand(Inst.getOperand(2)); 9714 TmpInst.addOperand(Inst.getOperand(1)); 9715 } 9716 TmpInst.addOperand(Inst.getOperand(3)); 9717 TmpInst.addOperand(Inst.getOperand(4)); 9718 Inst = TmpInst; 9719 return true; 9720 } 9721 return false; 9722 case ARM::MVE_VPST: 9723 case ARM::MVE_VPTv16i8: 9724 case ARM::MVE_VPTv8i16: 9725 case ARM::MVE_VPTv4i32: 9726 case ARM::MVE_VPTv16u8: 9727 case ARM::MVE_VPTv8u16: 9728 case ARM::MVE_VPTv4u32: 9729 case ARM::MVE_VPTv16s8: 9730 case ARM::MVE_VPTv8s16: 9731 case ARM::MVE_VPTv4s32: 9732 case ARM::MVE_VPTv4f32: 9733 case ARM::MVE_VPTv8f16: 9734 case ARM::MVE_VPTv16i8r: 9735 case ARM::MVE_VPTv8i16r: 9736 case ARM::MVE_VPTv4i32r: 9737 case ARM::MVE_VPTv16u8r: 9738 case ARM::MVE_VPTv8u16r: 9739 case ARM::MVE_VPTv4u32r: 9740 case ARM::MVE_VPTv16s8r: 9741 case ARM::MVE_VPTv8s16r: 9742 case ARM::MVE_VPTv4s32r: 9743 case ARM::MVE_VPTv4f32r: 9744 case ARM::MVE_VPTv8f16r: { 9745 assert(!inVPTBlock() && "Nested VPT blocks are not allowed"); 9746 MCOperand &MO = Inst.getOperand(0); 9747 VPTState.Mask = MO.getImm(); 9748 VPTState.CurPosition = 0; 9749 break; 9750 } 9751 } 9752 return false; 9753 } 9754 9755 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 9756 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 9757 // suffix depending on whether they're in an IT block or not. 9758 unsigned Opc = Inst.getOpcode(); 9759 const MCInstrDesc &MCID = MII.get(Opc); 9760 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 9761 assert(MCID.hasOptionalDef() && 9762 "optionally flag setting instruction missing optional def operand"); 9763 assert(MCID.NumOperands == Inst.getNumOperands() && 9764 "operand count mismatch!"); 9765 // Find the optional-def operand (cc_out). 9766 unsigned OpNo; 9767 for (OpNo = 0; 9768 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 9769 ++OpNo) 9770 ; 9771 // If we're parsing Thumb1, reject it completely. 9772 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 9773 return Match_RequiresFlagSetting; 9774 // If we're parsing Thumb2, which form is legal depends on whether we're 9775 // in an IT block. 9776 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 9777 !inITBlock()) 9778 return Match_RequiresITBlock; 9779 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 9780 inITBlock()) 9781 return Match_RequiresNotITBlock; 9782 // LSL with zero immediate is not allowed in an IT block 9783 if (Opc == ARM::tLSLri && Inst.getOperand(3).getImm() == 0 && inITBlock()) 9784 return Match_RequiresNotITBlock; 9785 } else if (isThumbOne()) { 9786 // Some high-register supporting Thumb1 encodings only allow both registers 9787 // to be from r0-r7 when in Thumb2. 9788 if (Opc == ARM::tADDhirr && !hasV6MOps() && 9789 isARMLowRegister(Inst.getOperand(1).getReg()) && 9790 isARMLowRegister(Inst.getOperand(2).getReg())) 9791 return Match_RequiresThumb2; 9792 // Others only require ARMv6 or later. 9793 else if (Opc == ARM::tMOVr && !hasV6Ops() && 9794 isARMLowRegister(Inst.getOperand(0).getReg()) && 9795 isARMLowRegister(Inst.getOperand(1).getReg())) 9796 return Match_RequiresV6; 9797 } 9798 9799 // Before ARMv8 the rules for when SP is allowed in t2MOVr are more complex 9800 // than the loop below can handle, so it uses the GPRnopc register class and 9801 // we do SP handling here. 9802 if (Opc == ARM::t2MOVr && !hasV8Ops()) 9803 { 9804 // SP as both source and destination is not allowed 9805 if (Inst.getOperand(0).getReg() == ARM::SP && 9806 Inst.getOperand(1).getReg() == ARM::SP) 9807 return Match_RequiresV8; 9808 // When flags-setting SP as either source or destination is not allowed 9809 if (Inst.getOperand(4).getReg() == ARM::CPSR && 9810 (Inst.getOperand(0).getReg() == ARM::SP || 9811 Inst.getOperand(1).getReg() == ARM::SP)) 9812 return Match_RequiresV8; 9813 } 9814 9815 switch (Inst.getOpcode()) { 9816 case ARM::VMRS: 9817 case ARM::VMSR: 9818 case ARM::VMRS_FPCXTS: 9819 case ARM::VMRS_FPCXTNS: 9820 case ARM::VMSR_FPCXTS: 9821 case ARM::VMSR_FPCXTNS: 9822 case ARM::VMRS_FPSCR_NZCVQC: 9823 case ARM::VMSR_FPSCR_NZCVQC: 9824 case ARM::FMSTAT: 9825 case ARM::VMRS_VPR: 9826 case ARM::VMRS_P0: 9827 case ARM::VMSR_VPR: 9828 case ARM::VMSR_P0: 9829 // Use of SP for VMRS/VMSR is only allowed in ARM mode with the exception of 9830 // ARMv8-A. 9831 if (Inst.getOperand(0).isReg() && Inst.getOperand(0).getReg() == ARM::SP && 9832 (isThumb() && !hasV8Ops())) 9833 return Match_InvalidOperand; 9834 break; 9835 default: 9836 break; 9837 } 9838 9839 for (unsigned I = 0; I < MCID.NumOperands; ++I) 9840 if (MCID.OpInfo[I].RegClass == ARM::rGPRRegClassID) { 9841 // rGPRRegClass excludes PC, and also excluded SP before ARMv8 9842 if ((Inst.getOperand(I).getReg() == ARM::SP) && !hasV8Ops()) 9843 return Match_RequiresV8; 9844 else if (Inst.getOperand(I).getReg() == ARM::PC) 9845 return Match_InvalidOperand; 9846 } 9847 9848 return Match_Success; 9849 } 9850 9851 namespace llvm { 9852 9853 template <> inline bool IsCPSRDead<MCInst>(const MCInst *Instr) { 9854 return true; // In an assembly source, no need to second-guess 9855 } 9856 9857 } // end namespace llvm 9858 9859 // Returns true if Inst is unpredictable if it is in and IT block, but is not 9860 // the last instruction in the block. 9861 bool ARMAsmParser::isITBlockTerminator(MCInst &Inst) const { 9862 const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); 9863 9864 // All branch & call instructions terminate IT blocks with the exception of 9865 // SVC. 9866 if (MCID.isTerminator() || (MCID.isCall() && Inst.getOpcode() != ARM::tSVC) || 9867 MCID.isReturn() || MCID.isBranch() || MCID.isIndirectBranch()) 9868 return true; 9869 9870 // Any arithmetic instruction which writes to the PC also terminates the IT 9871 // block. 9872 if (MCID.hasDefOfPhysReg(Inst, ARM::PC, *MRI)) 9873 return true; 9874 9875 return false; 9876 } 9877 9878 unsigned ARMAsmParser::MatchInstruction(OperandVector &Operands, MCInst &Inst, 9879 SmallVectorImpl<NearMissInfo> &NearMisses, 9880 bool MatchingInlineAsm, 9881 bool &EmitInITBlock, 9882 MCStreamer &Out) { 9883 // If we can't use an implicit IT block here, just match as normal. 9884 if (inExplicitITBlock() || !isThumbTwo() || !useImplicitITThumb()) 9885 return MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm); 9886 9887 // Try to match the instruction in an extension of the current IT block (if 9888 // there is one). 9889 if (inImplicitITBlock()) { 9890 extendImplicitITBlock(ITState.Cond); 9891 if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) == 9892 Match_Success) { 9893 // The match succeded, but we still have to check that the instruction is 9894 // valid in this implicit IT block. 9895 const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); 9896 if (MCID.isPredicable()) { 9897 ARMCC::CondCodes InstCond = 9898 (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx()) 9899 .getImm(); 9900 ARMCC::CondCodes ITCond = currentITCond(); 9901 if (InstCond == ITCond) { 9902 EmitInITBlock = true; 9903 return Match_Success; 9904 } else if (InstCond == ARMCC::getOppositeCondition(ITCond)) { 9905 invertCurrentITCondition(); 9906 EmitInITBlock = true; 9907 return Match_Success; 9908 } 9909 } 9910 } 9911 rewindImplicitITPosition(); 9912 } 9913 9914 // Finish the current IT block, and try to match outside any IT block. 9915 flushPendingInstructions(Out); 9916 unsigned PlainMatchResult = 9917 MatchInstructionImpl(Operands, Inst, &NearMisses, MatchingInlineAsm); 9918 if (PlainMatchResult == Match_Success) { 9919 const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); 9920 if (MCID.isPredicable()) { 9921 ARMCC::CondCodes InstCond = 9922 (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx()) 9923 .getImm(); 9924 // Some forms of the branch instruction have their own condition code 9925 // fields, so can be conditionally executed without an IT block. 9926 if (Inst.getOpcode() == ARM::tBcc || Inst.getOpcode() == ARM::t2Bcc) { 9927 EmitInITBlock = false; 9928 return Match_Success; 9929 } 9930 if (InstCond == ARMCC::AL) { 9931 EmitInITBlock = false; 9932 return Match_Success; 9933 } 9934 } else { 9935 EmitInITBlock = false; 9936 return Match_Success; 9937 } 9938 } 9939 9940 // Try to match in a new IT block. The matcher doesn't check the actual 9941 // condition, so we create an IT block with a dummy condition, and fix it up 9942 // once we know the actual condition. 9943 startImplicitITBlock(); 9944 if (MatchInstructionImpl(Operands, Inst, nullptr, MatchingInlineAsm) == 9945 Match_Success) { 9946 const MCInstrDesc &MCID = MII.get(Inst.getOpcode()); 9947 if (MCID.isPredicable()) { 9948 ITState.Cond = 9949 (ARMCC::CondCodes)Inst.getOperand(MCID.findFirstPredOperandIdx()) 9950 .getImm(); 9951 EmitInITBlock = true; 9952 return Match_Success; 9953 } 9954 } 9955 discardImplicitITBlock(); 9956 9957 // If none of these succeed, return the error we got when trying to match 9958 // outside any IT blocks. 9959 EmitInITBlock = false; 9960 return PlainMatchResult; 9961 } 9962 9963 static std::string ARMMnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, 9964 unsigned VariantID = 0); 9965 9966 static const char *getSubtargetFeatureName(uint64_t Val); 9967 bool ARMAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode, 9968 OperandVector &Operands, 9969 MCStreamer &Out, uint64_t &ErrorInfo, 9970 bool MatchingInlineAsm) { 9971 MCInst Inst; 9972 unsigned MatchResult; 9973 bool PendConditionalInstruction = false; 9974 9975 SmallVector<NearMissInfo, 4> NearMisses; 9976 MatchResult = MatchInstruction(Operands, Inst, NearMisses, MatchingInlineAsm, 9977 PendConditionalInstruction, Out); 9978 9979 switch (MatchResult) { 9980 case Match_Success: 9981 LLVM_DEBUG(dbgs() << "Parsed as: "; 9982 Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode())); 9983 dbgs() << "\n"); 9984 9985 // Context sensitive operand constraints aren't handled by the matcher, 9986 // so check them here. 9987 if (validateInstruction(Inst, Operands)) { 9988 // Still progress the IT block, otherwise one wrong condition causes 9989 // nasty cascading errors. 9990 forwardITPosition(); 9991 forwardVPTPosition(); 9992 return true; 9993 } 9994 9995 { // processInstruction() updates inITBlock state, we need to save it away 9996 bool wasInITBlock = inITBlock(); 9997 9998 // Some instructions need post-processing to, for example, tweak which 9999 // encoding is selected. Loop on it while changes happen so the 10000 // individual transformations can chain off each other. E.g., 10001 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 10002 while (processInstruction(Inst, Operands, Out)) 10003 LLVM_DEBUG(dbgs() << "Changed to: "; 10004 Inst.dump_pretty(dbgs(), MII.getName(Inst.getOpcode())); 10005 dbgs() << "\n"); 10006 10007 // Only after the instruction is fully processed, we can validate it 10008 if (wasInITBlock && hasV8Ops() && isThumb() && 10009 !isV8EligibleForIT(&Inst)) { 10010 Warning(IDLoc, "deprecated instruction in IT block"); 10011 } 10012 } 10013 10014 // Only move forward at the very end so that everything in validate 10015 // and process gets a consistent answer about whether we're in an IT 10016 // block. 10017 forwardITPosition(); 10018 forwardVPTPosition(); 10019 10020 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and 10021 // doesn't actually encode. 10022 if (Inst.getOpcode() == ARM::ITasm) 10023 return false; 10024 10025 Inst.setLoc(IDLoc); 10026 if (PendConditionalInstruction) { 10027 PendingConditionalInsts.push_back(Inst); 10028 if (isITBlockFull() || isITBlockTerminator(Inst)) 10029 flushPendingInstructions(Out); 10030 } else { 10031 Out.EmitInstruction(Inst, getSTI()); 10032 } 10033 return false; 10034 case Match_NearMisses: 10035 ReportNearMisses(NearMisses, IDLoc, Operands); 10036 return true; 10037 case Match_MnemonicFail: { 10038 FeatureBitset FBS = ComputeAvailableFeatures(getSTI().getFeatureBits()); 10039 std::string Suggestion = ARMMnemonicSpellCheck( 10040 ((ARMOperand &)*Operands[0]).getToken(), FBS); 10041 return Error(IDLoc, "invalid instruction" + Suggestion, 10042 ((ARMOperand &)*Operands[0]).getLocRange()); 10043 } 10044 } 10045 10046 llvm_unreachable("Implement any new match types added!"); 10047 } 10048 10049 /// parseDirective parses the arm specific directives 10050 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 10051 const MCObjectFileInfo::Environment Format = 10052 getContext().getObjectFileInfo()->getObjectFileType(); 10053 bool IsMachO = Format == MCObjectFileInfo::IsMachO; 10054 bool IsCOFF = Format == MCObjectFileInfo::IsCOFF; 10055 10056 StringRef IDVal = DirectiveID.getIdentifier(); 10057 if (IDVal == ".word") 10058 parseLiteralValues(4, DirectiveID.getLoc()); 10059 else if (IDVal == ".short" || IDVal == ".hword") 10060 parseLiteralValues(2, DirectiveID.getLoc()); 10061 else if (IDVal == ".thumb") 10062 parseDirectiveThumb(DirectiveID.getLoc()); 10063 else if (IDVal == ".arm") 10064 parseDirectiveARM(DirectiveID.getLoc()); 10065 else if (IDVal == ".thumb_func") 10066 parseDirectiveThumbFunc(DirectiveID.getLoc()); 10067 else if (IDVal == ".code") 10068 parseDirectiveCode(DirectiveID.getLoc()); 10069 else if (IDVal == ".syntax") 10070 parseDirectiveSyntax(DirectiveID.getLoc()); 10071 else if (IDVal == ".unreq") 10072 parseDirectiveUnreq(DirectiveID.getLoc()); 10073 else if (IDVal == ".fnend") 10074 parseDirectiveFnEnd(DirectiveID.getLoc()); 10075 else if (IDVal == ".cantunwind") 10076 parseDirectiveCantUnwind(DirectiveID.getLoc()); 10077 else if (IDVal == ".personality") 10078 parseDirectivePersonality(DirectiveID.getLoc()); 10079 else if (IDVal == ".handlerdata") 10080 parseDirectiveHandlerData(DirectiveID.getLoc()); 10081 else if (IDVal == ".setfp") 10082 parseDirectiveSetFP(DirectiveID.getLoc()); 10083 else if (IDVal == ".pad") 10084 parseDirectivePad(DirectiveID.getLoc()); 10085 else if (IDVal == ".save") 10086 parseDirectiveRegSave(DirectiveID.getLoc(), false); 10087 else if (IDVal == ".vsave") 10088 parseDirectiveRegSave(DirectiveID.getLoc(), true); 10089 else if (IDVal == ".ltorg" || IDVal == ".pool") 10090 parseDirectiveLtorg(DirectiveID.getLoc()); 10091 else if (IDVal == ".even") 10092 parseDirectiveEven(DirectiveID.getLoc()); 10093 else if (IDVal == ".personalityindex") 10094 parseDirectivePersonalityIndex(DirectiveID.getLoc()); 10095 else if (IDVal == ".unwind_raw") 10096 parseDirectiveUnwindRaw(DirectiveID.getLoc()); 10097 else if (IDVal == ".movsp") 10098 parseDirectiveMovSP(DirectiveID.getLoc()); 10099 else if (IDVal == ".arch_extension") 10100 parseDirectiveArchExtension(DirectiveID.getLoc()); 10101 else if (IDVal == ".align") 10102 return parseDirectiveAlign(DirectiveID.getLoc()); // Use Generic on failure. 10103 else if (IDVal == ".thumb_set") 10104 parseDirectiveThumbSet(DirectiveID.getLoc()); 10105 else if (IDVal == ".inst") 10106 parseDirectiveInst(DirectiveID.getLoc()); 10107 else if (IDVal == ".inst.n") 10108 parseDirectiveInst(DirectiveID.getLoc(), 'n'); 10109 else if (IDVal == ".inst.w") 10110 parseDirectiveInst(DirectiveID.getLoc(), 'w'); 10111 else if (!IsMachO && !IsCOFF) { 10112 if (IDVal == ".arch") 10113 parseDirectiveArch(DirectiveID.getLoc()); 10114 else if (IDVal == ".cpu") 10115 parseDirectiveCPU(DirectiveID.getLoc()); 10116 else if (IDVal == ".eabi_attribute") 10117 parseDirectiveEabiAttr(DirectiveID.getLoc()); 10118 else if (IDVal == ".fpu") 10119 parseDirectiveFPU(DirectiveID.getLoc()); 10120 else if (IDVal == ".fnstart") 10121 parseDirectiveFnStart(DirectiveID.getLoc()); 10122 else if (IDVal == ".object_arch") 10123 parseDirectiveObjectArch(DirectiveID.getLoc()); 10124 else if (IDVal == ".tlsdescseq") 10125 parseDirectiveTLSDescSeq(DirectiveID.getLoc()); 10126 else 10127 return true; 10128 } else 10129 return true; 10130 return false; 10131 } 10132 10133 /// parseLiteralValues 10134 /// ::= .hword expression [, expression]* 10135 /// ::= .short expression [, expression]* 10136 /// ::= .word expression [, expression]* 10137 bool ARMAsmParser::parseLiteralValues(unsigned Size, SMLoc L) { 10138 auto parseOne = [&]() -> bool { 10139 const MCExpr *Value; 10140 if (getParser().parseExpression(Value)) 10141 return true; 10142 getParser().getStreamer().EmitValue(Value, Size, L); 10143 return false; 10144 }; 10145 return (parseMany(parseOne)); 10146 } 10147 10148 /// parseDirectiveThumb 10149 /// ::= .thumb 10150 bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 10151 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") || 10152 check(!hasThumb(), L, "target does not support Thumb mode")) 10153 return true; 10154 10155 if (!isThumb()) 10156 SwitchMode(); 10157 10158 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 10159 return false; 10160 } 10161 10162 /// parseDirectiveARM 10163 /// ::= .arm 10164 bool ARMAsmParser::parseDirectiveARM(SMLoc L) { 10165 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive") || 10166 check(!hasARM(), L, "target does not support ARM mode")) 10167 return true; 10168 10169 if (isThumb()) 10170 SwitchMode(); 10171 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 10172 return false; 10173 } 10174 10175 void ARMAsmParser::doBeforeLabelEmit(MCSymbol *Symbol) { 10176 // We need to flush the current implicit IT block on a label, because it is 10177 // not legal to branch into an IT block. 10178 flushPendingInstructions(getStreamer()); 10179 } 10180 10181 void ARMAsmParser::onLabelParsed(MCSymbol *Symbol) { 10182 if (NextSymbolIsThumb) { 10183 getParser().getStreamer().EmitThumbFunc(Symbol); 10184 NextSymbolIsThumb = false; 10185 } 10186 } 10187 10188 /// parseDirectiveThumbFunc 10189 /// ::= .thumbfunc symbol_name 10190 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 10191 MCAsmParser &Parser = getParser(); 10192 const auto Format = getContext().getObjectFileInfo()->getObjectFileType(); 10193 bool IsMachO = Format == MCObjectFileInfo::IsMachO; 10194 10195 // Darwin asm has (optionally) function name after .thumb_func direction 10196 // ELF doesn't 10197 10198 if (IsMachO) { 10199 if (Parser.getTok().is(AsmToken::Identifier) || 10200 Parser.getTok().is(AsmToken::String)) { 10201 MCSymbol *Func = getParser().getContext().getOrCreateSymbol( 10202 Parser.getTok().getIdentifier()); 10203 getParser().getStreamer().EmitThumbFunc(Func); 10204 Parser.Lex(); 10205 if (parseToken(AsmToken::EndOfStatement, 10206 "unexpected token in '.thumb_func' directive")) 10207 return true; 10208 return false; 10209 } 10210 } 10211 10212 if (parseToken(AsmToken::EndOfStatement, 10213 "unexpected token in '.thumb_func' directive")) 10214 return true; 10215 10216 NextSymbolIsThumb = true; 10217 return false; 10218 } 10219 10220 /// parseDirectiveSyntax 10221 /// ::= .syntax unified | divided 10222 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 10223 MCAsmParser &Parser = getParser(); 10224 const AsmToken &Tok = Parser.getTok(); 10225 if (Tok.isNot(AsmToken::Identifier)) { 10226 Error(L, "unexpected token in .syntax directive"); 10227 return false; 10228 } 10229 10230 StringRef Mode = Tok.getString(); 10231 Parser.Lex(); 10232 if (check(Mode == "divided" || Mode == "DIVIDED", L, 10233 "'.syntax divided' arm assembly not supported") || 10234 check(Mode != "unified" && Mode != "UNIFIED", L, 10235 "unrecognized syntax mode in .syntax directive") || 10236 parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) 10237 return true; 10238 10239 // TODO tell the MC streamer the mode 10240 // getParser().getStreamer().Emit???(); 10241 return false; 10242 } 10243 10244 /// parseDirectiveCode 10245 /// ::= .code 16 | 32 10246 bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 10247 MCAsmParser &Parser = getParser(); 10248 const AsmToken &Tok = Parser.getTok(); 10249 if (Tok.isNot(AsmToken::Integer)) 10250 return Error(L, "unexpected token in .code directive"); 10251 int64_t Val = Parser.getTok().getIntVal(); 10252 if (Val != 16 && Val != 32) { 10253 Error(L, "invalid operand to .code directive"); 10254 return false; 10255 } 10256 Parser.Lex(); 10257 10258 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) 10259 return true; 10260 10261 if (Val == 16) { 10262 if (!hasThumb()) 10263 return Error(L, "target does not support Thumb mode"); 10264 10265 if (!isThumb()) 10266 SwitchMode(); 10267 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 10268 } else { 10269 if (!hasARM()) 10270 return Error(L, "target does not support ARM mode"); 10271 10272 if (isThumb()) 10273 SwitchMode(); 10274 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 10275 } 10276 10277 return false; 10278 } 10279 10280 /// parseDirectiveReq 10281 /// ::= name .req registername 10282 bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) { 10283 MCAsmParser &Parser = getParser(); 10284 Parser.Lex(); // Eat the '.req' token. 10285 unsigned Reg; 10286 SMLoc SRegLoc, ERegLoc; 10287 if (check(ParseRegister(Reg, SRegLoc, ERegLoc), SRegLoc, 10288 "register name expected") || 10289 parseToken(AsmToken::EndOfStatement, 10290 "unexpected input in .req directive.")) 10291 return true; 10292 10293 if (RegisterReqs.insert(std::make_pair(Name, Reg)).first->second != Reg) 10294 return Error(SRegLoc, 10295 "redefinition of '" + Name + "' does not match original."); 10296 10297 return false; 10298 } 10299 10300 /// parseDirectiveUneq 10301 /// ::= .unreq registername 10302 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) { 10303 MCAsmParser &Parser = getParser(); 10304 if (Parser.getTok().isNot(AsmToken::Identifier)) 10305 return Error(L, "unexpected input in .unreq directive."); 10306 RegisterReqs.erase(Parser.getTok().getIdentifier().lower()); 10307 Parser.Lex(); // Eat the identifier. 10308 if (parseToken(AsmToken::EndOfStatement, 10309 "unexpected input in '.unreq' directive")) 10310 return true; 10311 return false; 10312 } 10313 10314 // After changing arch/CPU, try to put the ARM/Thumb mode back to what it was 10315 // before, if supported by the new target, or emit mapping symbols for the mode 10316 // switch. 10317 void ARMAsmParser::FixModeAfterArchChange(bool WasThumb, SMLoc Loc) { 10318 if (WasThumb != isThumb()) { 10319 if (WasThumb && hasThumb()) { 10320 // Stay in Thumb mode 10321 SwitchMode(); 10322 } else if (!WasThumb && hasARM()) { 10323 // Stay in ARM mode 10324 SwitchMode(); 10325 } else { 10326 // Mode switch forced, because the new arch doesn't support the old mode. 10327 getParser().getStreamer().EmitAssemblerFlag(isThumb() ? MCAF_Code16 10328 : MCAF_Code32); 10329 // Warn about the implcit mode switch. GAS does not switch modes here, 10330 // but instead stays in the old mode, reporting an error on any following 10331 // instructions as the mode does not exist on the target. 10332 Warning(Loc, Twine("new target does not support ") + 10333 (WasThumb ? "thumb" : "arm") + " mode, switching to " + 10334 (!WasThumb ? "thumb" : "arm") + " mode"); 10335 } 10336 } 10337 } 10338 10339 /// parseDirectiveArch 10340 /// ::= .arch token 10341 bool ARMAsmParser::parseDirectiveArch(SMLoc L) { 10342 StringRef Arch = getParser().parseStringToEndOfStatement().trim(); 10343 ARM::ArchKind ID = ARM::parseArch(Arch); 10344 10345 if (ID == ARM::ArchKind::INVALID) 10346 return Error(L, "Unknown arch name"); 10347 10348 bool WasThumb = isThumb(); 10349 Triple T; 10350 MCSubtargetInfo &STI = copySTI(); 10351 STI.setDefaultFeatures("", ("+" + ARM::getArchName(ID)).str()); 10352 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 10353 FixModeAfterArchChange(WasThumb, L); 10354 10355 getTargetStreamer().emitArch(ID); 10356 return false; 10357 } 10358 10359 /// parseDirectiveEabiAttr 10360 /// ::= .eabi_attribute int, int [, "str"] 10361 /// ::= .eabi_attribute Tag_name, int [, "str"] 10362 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) { 10363 MCAsmParser &Parser = getParser(); 10364 int64_t Tag; 10365 SMLoc TagLoc; 10366 TagLoc = Parser.getTok().getLoc(); 10367 if (Parser.getTok().is(AsmToken::Identifier)) { 10368 StringRef Name = Parser.getTok().getIdentifier(); 10369 Tag = ARMBuildAttrs::AttrTypeFromString(Name); 10370 if (Tag == -1) { 10371 Error(TagLoc, "attribute name not recognised: " + Name); 10372 return false; 10373 } 10374 Parser.Lex(); 10375 } else { 10376 const MCExpr *AttrExpr; 10377 10378 TagLoc = Parser.getTok().getLoc(); 10379 if (Parser.parseExpression(AttrExpr)) 10380 return true; 10381 10382 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(AttrExpr); 10383 if (check(!CE, TagLoc, "expected numeric constant")) 10384 return true; 10385 10386 Tag = CE->getValue(); 10387 } 10388 10389 if (Parser.parseToken(AsmToken::Comma, "comma expected")) 10390 return true; 10391 10392 StringRef StringValue = ""; 10393 bool IsStringValue = false; 10394 10395 int64_t IntegerValue = 0; 10396 bool IsIntegerValue = false; 10397 10398 if (Tag == ARMBuildAttrs::CPU_raw_name || Tag == ARMBuildAttrs::CPU_name) 10399 IsStringValue = true; 10400 else if (Tag == ARMBuildAttrs::compatibility) { 10401 IsStringValue = true; 10402 IsIntegerValue = true; 10403 } else if (Tag < 32 || Tag % 2 == 0) 10404 IsIntegerValue = true; 10405 else if (Tag % 2 == 1) 10406 IsStringValue = true; 10407 else 10408 llvm_unreachable("invalid tag type"); 10409 10410 if (IsIntegerValue) { 10411 const MCExpr *ValueExpr; 10412 SMLoc ValueExprLoc = Parser.getTok().getLoc(); 10413 if (Parser.parseExpression(ValueExpr)) 10414 return true; 10415 10416 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ValueExpr); 10417 if (!CE) 10418 return Error(ValueExprLoc, "expected numeric constant"); 10419 IntegerValue = CE->getValue(); 10420 } 10421 10422 if (Tag == ARMBuildAttrs::compatibility) { 10423 if (Parser.parseToken(AsmToken::Comma, "comma expected")) 10424 return true; 10425 } 10426 10427 if (IsStringValue) { 10428 if (Parser.getTok().isNot(AsmToken::String)) 10429 return Error(Parser.getTok().getLoc(), "bad string constant"); 10430 10431 StringValue = Parser.getTok().getStringContents(); 10432 Parser.Lex(); 10433 } 10434 10435 if (Parser.parseToken(AsmToken::EndOfStatement, 10436 "unexpected token in '.eabi_attribute' directive")) 10437 return true; 10438 10439 if (IsIntegerValue && IsStringValue) { 10440 assert(Tag == ARMBuildAttrs::compatibility); 10441 getTargetStreamer().emitIntTextAttribute(Tag, IntegerValue, StringValue); 10442 } else if (IsIntegerValue) 10443 getTargetStreamer().emitAttribute(Tag, IntegerValue); 10444 else if (IsStringValue) 10445 getTargetStreamer().emitTextAttribute(Tag, StringValue); 10446 return false; 10447 } 10448 10449 /// parseDirectiveCPU 10450 /// ::= .cpu str 10451 bool ARMAsmParser::parseDirectiveCPU(SMLoc L) { 10452 StringRef CPU = getParser().parseStringToEndOfStatement().trim(); 10453 getTargetStreamer().emitTextAttribute(ARMBuildAttrs::CPU_name, CPU); 10454 10455 // FIXME: This is using table-gen data, but should be moved to 10456 // ARMTargetParser once that is table-gen'd. 10457 if (!getSTI().isCPUStringValid(CPU)) 10458 return Error(L, "Unknown CPU name"); 10459 10460 bool WasThumb = isThumb(); 10461 MCSubtargetInfo &STI = copySTI(); 10462 STI.setDefaultFeatures(CPU, ""); 10463 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 10464 FixModeAfterArchChange(WasThumb, L); 10465 10466 return false; 10467 } 10468 10469 /// parseDirectiveFPU 10470 /// ::= .fpu str 10471 bool ARMAsmParser::parseDirectiveFPU(SMLoc L) { 10472 SMLoc FPUNameLoc = getTok().getLoc(); 10473 StringRef FPU = getParser().parseStringToEndOfStatement().trim(); 10474 10475 unsigned ID = ARM::parseFPU(FPU); 10476 std::vector<StringRef> Features; 10477 if (!ARM::getFPUFeatures(ID, Features)) 10478 return Error(FPUNameLoc, "Unknown FPU name"); 10479 10480 MCSubtargetInfo &STI = copySTI(); 10481 for (auto Feature : Features) 10482 STI.ApplyFeatureFlag(Feature); 10483 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 10484 10485 getTargetStreamer().emitFPU(ID); 10486 return false; 10487 } 10488 10489 /// parseDirectiveFnStart 10490 /// ::= .fnstart 10491 bool ARMAsmParser::parseDirectiveFnStart(SMLoc L) { 10492 if (parseToken(AsmToken::EndOfStatement, 10493 "unexpected token in '.fnstart' directive")) 10494 return true; 10495 10496 if (UC.hasFnStart()) { 10497 Error(L, ".fnstart starts before the end of previous one"); 10498 UC.emitFnStartLocNotes(); 10499 return true; 10500 } 10501 10502 // Reset the unwind directives parser state 10503 UC.reset(); 10504 10505 getTargetStreamer().emitFnStart(); 10506 10507 UC.recordFnStart(L); 10508 return false; 10509 } 10510 10511 /// parseDirectiveFnEnd 10512 /// ::= .fnend 10513 bool ARMAsmParser::parseDirectiveFnEnd(SMLoc L) { 10514 if (parseToken(AsmToken::EndOfStatement, 10515 "unexpected token in '.fnend' directive")) 10516 return true; 10517 // Check the ordering of unwind directives 10518 if (!UC.hasFnStart()) 10519 return Error(L, ".fnstart must precede .fnend directive"); 10520 10521 // Reset the unwind directives parser state 10522 getTargetStreamer().emitFnEnd(); 10523 10524 UC.reset(); 10525 return false; 10526 } 10527 10528 /// parseDirectiveCantUnwind 10529 /// ::= .cantunwind 10530 bool ARMAsmParser::parseDirectiveCantUnwind(SMLoc L) { 10531 if (parseToken(AsmToken::EndOfStatement, 10532 "unexpected token in '.cantunwind' directive")) 10533 return true; 10534 10535 UC.recordCantUnwind(L); 10536 // Check the ordering of unwind directives 10537 if (check(!UC.hasFnStart(), L, ".fnstart must precede .cantunwind directive")) 10538 return true; 10539 10540 if (UC.hasHandlerData()) { 10541 Error(L, ".cantunwind can't be used with .handlerdata directive"); 10542 UC.emitHandlerDataLocNotes(); 10543 return true; 10544 } 10545 if (UC.hasPersonality()) { 10546 Error(L, ".cantunwind can't be used with .personality directive"); 10547 UC.emitPersonalityLocNotes(); 10548 return true; 10549 } 10550 10551 getTargetStreamer().emitCantUnwind(); 10552 return false; 10553 } 10554 10555 /// parseDirectivePersonality 10556 /// ::= .personality name 10557 bool ARMAsmParser::parseDirectivePersonality(SMLoc L) { 10558 MCAsmParser &Parser = getParser(); 10559 bool HasExistingPersonality = UC.hasPersonality(); 10560 10561 // Parse the name of the personality routine 10562 if (Parser.getTok().isNot(AsmToken::Identifier)) 10563 return Error(L, "unexpected input in .personality directive."); 10564 StringRef Name(Parser.getTok().getIdentifier()); 10565 Parser.Lex(); 10566 10567 if (parseToken(AsmToken::EndOfStatement, 10568 "unexpected token in '.personality' directive")) 10569 return true; 10570 10571 UC.recordPersonality(L); 10572 10573 // Check the ordering of unwind directives 10574 if (!UC.hasFnStart()) 10575 return Error(L, ".fnstart must precede .personality directive"); 10576 if (UC.cantUnwind()) { 10577 Error(L, ".personality can't be used with .cantunwind directive"); 10578 UC.emitCantUnwindLocNotes(); 10579 return true; 10580 } 10581 if (UC.hasHandlerData()) { 10582 Error(L, ".personality must precede .handlerdata directive"); 10583 UC.emitHandlerDataLocNotes(); 10584 return true; 10585 } 10586 if (HasExistingPersonality) { 10587 Error(L, "multiple personality directives"); 10588 UC.emitPersonalityLocNotes(); 10589 return true; 10590 } 10591 10592 MCSymbol *PR = getParser().getContext().getOrCreateSymbol(Name); 10593 getTargetStreamer().emitPersonality(PR); 10594 return false; 10595 } 10596 10597 /// parseDirectiveHandlerData 10598 /// ::= .handlerdata 10599 bool ARMAsmParser::parseDirectiveHandlerData(SMLoc L) { 10600 if (parseToken(AsmToken::EndOfStatement, 10601 "unexpected token in '.handlerdata' directive")) 10602 return true; 10603 10604 UC.recordHandlerData(L); 10605 // Check the ordering of unwind directives 10606 if (!UC.hasFnStart()) 10607 return Error(L, ".fnstart must precede .personality directive"); 10608 if (UC.cantUnwind()) { 10609 Error(L, ".handlerdata can't be used with .cantunwind directive"); 10610 UC.emitCantUnwindLocNotes(); 10611 return true; 10612 } 10613 10614 getTargetStreamer().emitHandlerData(); 10615 return false; 10616 } 10617 10618 /// parseDirectiveSetFP 10619 /// ::= .setfp fpreg, spreg [, offset] 10620 bool ARMAsmParser::parseDirectiveSetFP(SMLoc L) { 10621 MCAsmParser &Parser = getParser(); 10622 // Check the ordering of unwind directives 10623 if (check(!UC.hasFnStart(), L, ".fnstart must precede .setfp directive") || 10624 check(UC.hasHandlerData(), L, 10625 ".setfp must precede .handlerdata directive")) 10626 return true; 10627 10628 // Parse fpreg 10629 SMLoc FPRegLoc = Parser.getTok().getLoc(); 10630 int FPReg = tryParseRegister(); 10631 10632 if (check(FPReg == -1, FPRegLoc, "frame pointer register expected") || 10633 Parser.parseToken(AsmToken::Comma, "comma expected")) 10634 return true; 10635 10636 // Parse spreg 10637 SMLoc SPRegLoc = Parser.getTok().getLoc(); 10638 int SPReg = tryParseRegister(); 10639 if (check(SPReg == -1, SPRegLoc, "stack pointer register expected") || 10640 check(SPReg != ARM::SP && SPReg != UC.getFPReg(), SPRegLoc, 10641 "register should be either $sp or the latest fp register")) 10642 return true; 10643 10644 // Update the frame pointer register 10645 UC.saveFPReg(FPReg); 10646 10647 // Parse offset 10648 int64_t Offset = 0; 10649 if (Parser.parseOptionalToken(AsmToken::Comma)) { 10650 if (Parser.getTok().isNot(AsmToken::Hash) && 10651 Parser.getTok().isNot(AsmToken::Dollar)) 10652 return Error(Parser.getTok().getLoc(), "'#' expected"); 10653 Parser.Lex(); // skip hash token. 10654 10655 const MCExpr *OffsetExpr; 10656 SMLoc ExLoc = Parser.getTok().getLoc(); 10657 SMLoc EndLoc; 10658 if (getParser().parseExpression(OffsetExpr, EndLoc)) 10659 return Error(ExLoc, "malformed setfp offset"); 10660 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); 10661 if (check(!CE, ExLoc, "setfp offset must be an immediate")) 10662 return true; 10663 Offset = CE->getValue(); 10664 } 10665 10666 if (Parser.parseToken(AsmToken::EndOfStatement)) 10667 return true; 10668 10669 getTargetStreamer().emitSetFP(static_cast<unsigned>(FPReg), 10670 static_cast<unsigned>(SPReg), Offset); 10671 return false; 10672 } 10673 10674 /// parseDirective 10675 /// ::= .pad offset 10676 bool ARMAsmParser::parseDirectivePad(SMLoc L) { 10677 MCAsmParser &Parser = getParser(); 10678 // Check the ordering of unwind directives 10679 if (!UC.hasFnStart()) 10680 return Error(L, ".fnstart must precede .pad directive"); 10681 if (UC.hasHandlerData()) 10682 return Error(L, ".pad must precede .handlerdata directive"); 10683 10684 // Parse the offset 10685 if (Parser.getTok().isNot(AsmToken::Hash) && 10686 Parser.getTok().isNot(AsmToken::Dollar)) 10687 return Error(Parser.getTok().getLoc(), "'#' expected"); 10688 Parser.Lex(); // skip hash token. 10689 10690 const MCExpr *OffsetExpr; 10691 SMLoc ExLoc = Parser.getTok().getLoc(); 10692 SMLoc EndLoc; 10693 if (getParser().parseExpression(OffsetExpr, EndLoc)) 10694 return Error(ExLoc, "malformed pad offset"); 10695 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); 10696 if (!CE) 10697 return Error(ExLoc, "pad offset must be an immediate"); 10698 10699 if (parseToken(AsmToken::EndOfStatement, 10700 "unexpected token in '.pad' directive")) 10701 return true; 10702 10703 getTargetStreamer().emitPad(CE->getValue()); 10704 return false; 10705 } 10706 10707 /// parseDirectiveRegSave 10708 /// ::= .save { registers } 10709 /// ::= .vsave { registers } 10710 bool ARMAsmParser::parseDirectiveRegSave(SMLoc L, bool IsVector) { 10711 // Check the ordering of unwind directives 10712 if (!UC.hasFnStart()) 10713 return Error(L, ".fnstart must precede .save or .vsave directives"); 10714 if (UC.hasHandlerData()) 10715 return Error(L, ".save or .vsave must precede .handlerdata directive"); 10716 10717 // RAII object to make sure parsed operands are deleted. 10718 SmallVector<std::unique_ptr<MCParsedAsmOperand>, 1> Operands; 10719 10720 // Parse the register list 10721 if (parseRegisterList(Operands) || 10722 parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) 10723 return true; 10724 ARMOperand &Op = (ARMOperand &)*Operands[0]; 10725 if (!IsVector && !Op.isRegList()) 10726 return Error(L, ".save expects GPR registers"); 10727 if (IsVector && !Op.isDPRRegList()) 10728 return Error(L, ".vsave expects DPR registers"); 10729 10730 getTargetStreamer().emitRegSave(Op.getRegList(), IsVector); 10731 return false; 10732 } 10733 10734 /// parseDirectiveInst 10735 /// ::= .inst opcode [, ...] 10736 /// ::= .inst.n opcode [, ...] 10737 /// ::= .inst.w opcode [, ...] 10738 bool ARMAsmParser::parseDirectiveInst(SMLoc Loc, char Suffix) { 10739 int Width = 4; 10740 10741 if (isThumb()) { 10742 switch (Suffix) { 10743 case 'n': 10744 Width = 2; 10745 break; 10746 case 'w': 10747 break; 10748 default: 10749 Width = 0; 10750 break; 10751 } 10752 } else { 10753 if (Suffix) 10754 return Error(Loc, "width suffixes are invalid in ARM mode"); 10755 } 10756 10757 auto parseOne = [&]() -> bool { 10758 const MCExpr *Expr; 10759 if (getParser().parseExpression(Expr)) 10760 return true; 10761 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr); 10762 if (!Value) { 10763 return Error(Loc, "expected constant expression"); 10764 } 10765 10766 char CurSuffix = Suffix; 10767 switch (Width) { 10768 case 2: 10769 if (Value->getValue() > 0xffff) 10770 return Error(Loc, "inst.n operand is too big, use inst.w instead"); 10771 break; 10772 case 4: 10773 if (Value->getValue() > 0xffffffff) 10774 return Error(Loc, StringRef(Suffix ? "inst.w" : "inst") + 10775 " operand is too big"); 10776 break; 10777 case 0: 10778 // Thumb mode, no width indicated. Guess from the opcode, if possible. 10779 if (Value->getValue() < 0xe800) 10780 CurSuffix = 'n'; 10781 else if (Value->getValue() >= 0xe8000000) 10782 CurSuffix = 'w'; 10783 else 10784 return Error(Loc, "cannot determine Thumb instruction size, " 10785 "use inst.n/inst.w instead"); 10786 break; 10787 default: 10788 llvm_unreachable("only supported widths are 2 and 4"); 10789 } 10790 10791 getTargetStreamer().emitInst(Value->getValue(), CurSuffix); 10792 return false; 10793 }; 10794 10795 if (parseOptionalToken(AsmToken::EndOfStatement)) 10796 return Error(Loc, "expected expression following directive"); 10797 if (parseMany(parseOne)) 10798 return true; 10799 return false; 10800 } 10801 10802 /// parseDirectiveLtorg 10803 /// ::= .ltorg | .pool 10804 bool ARMAsmParser::parseDirectiveLtorg(SMLoc L) { 10805 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) 10806 return true; 10807 getTargetStreamer().emitCurrentConstantPool(); 10808 return false; 10809 } 10810 10811 bool ARMAsmParser::parseDirectiveEven(SMLoc L) { 10812 const MCSection *Section = getStreamer().getCurrentSectionOnly(); 10813 10814 if (parseToken(AsmToken::EndOfStatement, "unexpected token in directive")) 10815 return true; 10816 10817 if (!Section) { 10818 getStreamer().InitSections(false); 10819 Section = getStreamer().getCurrentSectionOnly(); 10820 } 10821 10822 assert(Section && "must have section to emit alignment"); 10823 if (Section->UseCodeAlign()) 10824 getStreamer().EmitCodeAlignment(2); 10825 else 10826 getStreamer().EmitValueToAlignment(2); 10827 10828 return false; 10829 } 10830 10831 /// parseDirectivePersonalityIndex 10832 /// ::= .personalityindex index 10833 bool ARMAsmParser::parseDirectivePersonalityIndex(SMLoc L) { 10834 MCAsmParser &Parser = getParser(); 10835 bool HasExistingPersonality = UC.hasPersonality(); 10836 10837 const MCExpr *IndexExpression; 10838 SMLoc IndexLoc = Parser.getTok().getLoc(); 10839 if (Parser.parseExpression(IndexExpression) || 10840 parseToken(AsmToken::EndOfStatement, 10841 "unexpected token in '.personalityindex' directive")) { 10842 return true; 10843 } 10844 10845 UC.recordPersonalityIndex(L); 10846 10847 if (!UC.hasFnStart()) { 10848 return Error(L, ".fnstart must precede .personalityindex directive"); 10849 } 10850 if (UC.cantUnwind()) { 10851 Error(L, ".personalityindex cannot be used with .cantunwind"); 10852 UC.emitCantUnwindLocNotes(); 10853 return true; 10854 } 10855 if (UC.hasHandlerData()) { 10856 Error(L, ".personalityindex must precede .handlerdata directive"); 10857 UC.emitHandlerDataLocNotes(); 10858 return true; 10859 } 10860 if (HasExistingPersonality) { 10861 Error(L, "multiple personality directives"); 10862 UC.emitPersonalityLocNotes(); 10863 return true; 10864 } 10865 10866 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(IndexExpression); 10867 if (!CE) 10868 return Error(IndexLoc, "index must be a constant number"); 10869 if (CE->getValue() < 0 || CE->getValue() >= ARM::EHABI::NUM_PERSONALITY_INDEX) 10870 return Error(IndexLoc, 10871 "personality routine index should be in range [0-3]"); 10872 10873 getTargetStreamer().emitPersonalityIndex(CE->getValue()); 10874 return false; 10875 } 10876 10877 /// parseDirectiveUnwindRaw 10878 /// ::= .unwind_raw offset, opcode [, opcode...] 10879 bool ARMAsmParser::parseDirectiveUnwindRaw(SMLoc L) { 10880 MCAsmParser &Parser = getParser(); 10881 int64_t StackOffset; 10882 const MCExpr *OffsetExpr; 10883 SMLoc OffsetLoc = getLexer().getLoc(); 10884 10885 if (!UC.hasFnStart()) 10886 return Error(L, ".fnstart must precede .unwind_raw directives"); 10887 if (getParser().parseExpression(OffsetExpr)) 10888 return Error(OffsetLoc, "expected expression"); 10889 10890 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); 10891 if (!CE) 10892 return Error(OffsetLoc, "offset must be a constant"); 10893 10894 StackOffset = CE->getValue(); 10895 10896 if (Parser.parseToken(AsmToken::Comma, "expected comma")) 10897 return true; 10898 10899 SmallVector<uint8_t, 16> Opcodes; 10900 10901 auto parseOne = [&]() -> bool { 10902 const MCExpr *OE; 10903 SMLoc OpcodeLoc = getLexer().getLoc(); 10904 if (check(getLexer().is(AsmToken::EndOfStatement) || 10905 Parser.parseExpression(OE), 10906 OpcodeLoc, "expected opcode expression")) 10907 return true; 10908 const MCConstantExpr *OC = dyn_cast<MCConstantExpr>(OE); 10909 if (!OC) 10910 return Error(OpcodeLoc, "opcode value must be a constant"); 10911 const int64_t Opcode = OC->getValue(); 10912 if (Opcode & ~0xff) 10913 return Error(OpcodeLoc, "invalid opcode"); 10914 Opcodes.push_back(uint8_t(Opcode)); 10915 return false; 10916 }; 10917 10918 // Must have at least 1 element 10919 SMLoc OpcodeLoc = getLexer().getLoc(); 10920 if (parseOptionalToken(AsmToken::EndOfStatement)) 10921 return Error(OpcodeLoc, "expected opcode expression"); 10922 if (parseMany(parseOne)) 10923 return true; 10924 10925 getTargetStreamer().emitUnwindRaw(StackOffset, Opcodes); 10926 return false; 10927 } 10928 10929 /// parseDirectiveTLSDescSeq 10930 /// ::= .tlsdescseq tls-variable 10931 bool ARMAsmParser::parseDirectiveTLSDescSeq(SMLoc L) { 10932 MCAsmParser &Parser = getParser(); 10933 10934 if (getLexer().isNot(AsmToken::Identifier)) 10935 return TokError("expected variable after '.tlsdescseq' directive"); 10936 10937 const MCSymbolRefExpr *SRE = 10938 MCSymbolRefExpr::create(Parser.getTok().getIdentifier(), 10939 MCSymbolRefExpr::VK_ARM_TLSDESCSEQ, getContext()); 10940 Lex(); 10941 10942 if (parseToken(AsmToken::EndOfStatement, 10943 "unexpected token in '.tlsdescseq' directive")) 10944 return true; 10945 10946 getTargetStreamer().AnnotateTLSDescriptorSequence(SRE); 10947 return false; 10948 } 10949 10950 /// parseDirectiveMovSP 10951 /// ::= .movsp reg [, #offset] 10952 bool ARMAsmParser::parseDirectiveMovSP(SMLoc L) { 10953 MCAsmParser &Parser = getParser(); 10954 if (!UC.hasFnStart()) 10955 return Error(L, ".fnstart must precede .movsp directives"); 10956 if (UC.getFPReg() != ARM::SP) 10957 return Error(L, "unexpected .movsp directive"); 10958 10959 SMLoc SPRegLoc = Parser.getTok().getLoc(); 10960 int SPReg = tryParseRegister(); 10961 if (SPReg == -1) 10962 return Error(SPRegLoc, "register expected"); 10963 if (SPReg == ARM::SP || SPReg == ARM::PC) 10964 return Error(SPRegLoc, "sp and pc are not permitted in .movsp directive"); 10965 10966 int64_t Offset = 0; 10967 if (Parser.parseOptionalToken(AsmToken::Comma)) { 10968 if (Parser.parseToken(AsmToken::Hash, "expected #constant")) 10969 return true; 10970 10971 const MCExpr *OffsetExpr; 10972 SMLoc OffsetLoc = Parser.getTok().getLoc(); 10973 10974 if (Parser.parseExpression(OffsetExpr)) 10975 return Error(OffsetLoc, "malformed offset expression"); 10976 10977 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(OffsetExpr); 10978 if (!CE) 10979 return Error(OffsetLoc, "offset must be an immediate constant"); 10980 10981 Offset = CE->getValue(); 10982 } 10983 10984 if (parseToken(AsmToken::EndOfStatement, 10985 "unexpected token in '.movsp' directive")) 10986 return true; 10987 10988 getTargetStreamer().emitMovSP(SPReg, Offset); 10989 UC.saveFPReg(SPReg); 10990 10991 return false; 10992 } 10993 10994 /// parseDirectiveObjectArch 10995 /// ::= .object_arch name 10996 bool ARMAsmParser::parseDirectiveObjectArch(SMLoc L) { 10997 MCAsmParser &Parser = getParser(); 10998 if (getLexer().isNot(AsmToken::Identifier)) 10999 return Error(getLexer().getLoc(), "unexpected token"); 11000 11001 StringRef Arch = Parser.getTok().getString(); 11002 SMLoc ArchLoc = Parser.getTok().getLoc(); 11003 Lex(); 11004 11005 ARM::ArchKind ID = ARM::parseArch(Arch); 11006 11007 if (ID == ARM::ArchKind::INVALID) 11008 return Error(ArchLoc, "unknown architecture '" + Arch + "'"); 11009 if (parseToken(AsmToken::EndOfStatement)) 11010 return true; 11011 11012 getTargetStreamer().emitObjectArch(ID); 11013 return false; 11014 } 11015 11016 /// parseDirectiveAlign 11017 /// ::= .align 11018 bool ARMAsmParser::parseDirectiveAlign(SMLoc L) { 11019 // NOTE: if this is not the end of the statement, fall back to the target 11020 // agnostic handling for this directive which will correctly handle this. 11021 if (parseOptionalToken(AsmToken::EndOfStatement)) { 11022 // '.align' is target specifically handled to mean 2**2 byte alignment. 11023 const MCSection *Section = getStreamer().getCurrentSectionOnly(); 11024 assert(Section && "must have section to emit alignment"); 11025 if (Section->UseCodeAlign()) 11026 getStreamer().EmitCodeAlignment(4, 0); 11027 else 11028 getStreamer().EmitValueToAlignment(4, 0, 1, 0); 11029 return false; 11030 } 11031 return true; 11032 } 11033 11034 /// parseDirectiveThumbSet 11035 /// ::= .thumb_set name, value 11036 bool ARMAsmParser::parseDirectiveThumbSet(SMLoc L) { 11037 MCAsmParser &Parser = getParser(); 11038 11039 StringRef Name; 11040 if (check(Parser.parseIdentifier(Name), 11041 "expected identifier after '.thumb_set'") || 11042 parseToken(AsmToken::Comma, "expected comma after name '" + Name + "'")) 11043 return true; 11044 11045 MCSymbol *Sym; 11046 const MCExpr *Value; 11047 if (MCParserUtils::parseAssignmentExpression(Name, /* allow_redef */ true, 11048 Parser, Sym, Value)) 11049 return true; 11050 11051 getTargetStreamer().emitThumbSet(Sym, Value); 11052 return false; 11053 } 11054 11055 /// Force static initialization. 11056 extern "C" void LLVMInitializeARMAsmParser() { 11057 RegisterMCAsmParser<ARMAsmParser> X(getTheARMLETarget()); 11058 RegisterMCAsmParser<ARMAsmParser> Y(getTheARMBETarget()); 11059 RegisterMCAsmParser<ARMAsmParser> A(getTheThumbLETarget()); 11060 RegisterMCAsmParser<ARMAsmParser> B(getTheThumbBETarget()); 11061 } 11062 11063 #define GET_REGISTER_MATCHER 11064 #define GET_SUBTARGET_FEATURE_NAME 11065 #define GET_MATCHER_IMPLEMENTATION 11066 #define GET_MNEMONIC_SPELL_CHECKER 11067 #include "ARMGenAsmMatcher.inc" 11068 11069 // Some diagnostics need to vary with subtarget features, so they are handled 11070 // here. For example, the DPR class has either 16 or 32 registers, depending 11071 // on the FPU available. 11072 const char * 11073 ARMAsmParser::getCustomOperandDiag(ARMMatchResultTy MatchError) { 11074 switch (MatchError) { 11075 // rGPR contains sp starting with ARMv8. 11076 case Match_rGPR: 11077 return hasV8Ops() ? "operand must be a register in range [r0, r14]" 11078 : "operand must be a register in range [r0, r12] or r14"; 11079 // DPR contains 16 registers for some FPUs, and 32 for others. 11080 case Match_DPR: 11081 return hasD32() ? "operand must be a register in range [d0, d31]" 11082 : "operand must be a register in range [d0, d15]"; 11083 case Match_DPR_RegList: 11084 return hasD32() ? "operand must be a list of registers in range [d0, d31]" 11085 : "operand must be a list of registers in range [d0, d15]"; 11086 11087 // For all other diags, use the static string from tablegen. 11088 default: 11089 return getMatchKindDiag(MatchError); 11090 } 11091 } 11092 11093 // Process the list of near-misses, throwing away ones we don't want to report 11094 // to the user, and converting the rest to a source location and string that 11095 // should be reported. 11096 void 11097 ARMAsmParser::FilterNearMisses(SmallVectorImpl<NearMissInfo> &NearMissesIn, 11098 SmallVectorImpl<NearMissMessage> &NearMissesOut, 11099 SMLoc IDLoc, OperandVector &Operands) { 11100 // TODO: If operand didn't match, sub in a dummy one and run target 11101 // predicate, so that we can avoid reporting near-misses that are invalid? 11102 // TODO: Many operand types dont have SuperClasses set, so we report 11103 // redundant ones. 11104 // TODO: Some operands are superclasses of registers (e.g. 11105 // MCK_RegShiftedImm), we don't have any way to represent that currently. 11106 // TODO: This is not all ARM-specific, can some of it be factored out? 11107 11108 // Record some information about near-misses that we have already seen, so 11109 // that we can avoid reporting redundant ones. For example, if there are 11110 // variants of an instruction that take 8- and 16-bit immediates, we want 11111 // to only report the widest one. 11112 std::multimap<unsigned, unsigned> OperandMissesSeen; 11113 SmallSet<FeatureBitset, 4> FeatureMissesSeen; 11114 bool ReportedTooFewOperands = false; 11115 11116 // Process the near-misses in reverse order, so that we see more general ones 11117 // first, and so can avoid emitting more specific ones. 11118 for (NearMissInfo &I : reverse(NearMissesIn)) { 11119 switch (I.getKind()) { 11120 case NearMissInfo::NearMissOperand: { 11121 SMLoc OperandLoc = 11122 ((ARMOperand &)*Operands[I.getOperandIndex()]).getStartLoc(); 11123 const char *OperandDiag = 11124 getCustomOperandDiag((ARMMatchResultTy)I.getOperandError()); 11125 11126 // If we have already emitted a message for a superclass, don't also report 11127 // the sub-class. We consider all operand classes that we don't have a 11128 // specialised diagnostic for to be equal for the propose of this check, 11129 // so that we don't report the generic error multiple times on the same 11130 // operand. 11131 unsigned DupCheckMatchClass = OperandDiag ? I.getOperandClass() : ~0U; 11132 auto PrevReports = OperandMissesSeen.equal_range(I.getOperandIndex()); 11133 if (std::any_of(PrevReports.first, PrevReports.second, 11134 [DupCheckMatchClass]( 11135 const std::pair<unsigned, unsigned> Pair) { 11136 if (DupCheckMatchClass == ~0U || Pair.second == ~0U) 11137 return Pair.second == DupCheckMatchClass; 11138 else 11139 return isSubclass((MatchClassKind)DupCheckMatchClass, 11140 (MatchClassKind)Pair.second); 11141 })) 11142 break; 11143 OperandMissesSeen.insert( 11144 std::make_pair(I.getOperandIndex(), DupCheckMatchClass)); 11145 11146 NearMissMessage Message; 11147 Message.Loc = OperandLoc; 11148 if (OperandDiag) { 11149 Message.Message = OperandDiag; 11150 } else if (I.getOperandClass() == InvalidMatchClass) { 11151 Message.Message = "too many operands for instruction"; 11152 } else { 11153 Message.Message = "invalid operand for instruction"; 11154 LLVM_DEBUG( 11155 dbgs() << "Missing diagnostic string for operand class " 11156 << getMatchClassName((MatchClassKind)I.getOperandClass()) 11157 << I.getOperandClass() << ", error " << I.getOperandError() 11158 << ", opcode " << MII.getName(I.getOpcode()) << "\n"); 11159 } 11160 NearMissesOut.emplace_back(Message); 11161 break; 11162 } 11163 case NearMissInfo::NearMissFeature: { 11164 const FeatureBitset &MissingFeatures = I.getFeatures(); 11165 // Don't report the same set of features twice. 11166 if (FeatureMissesSeen.count(MissingFeatures)) 11167 break; 11168 FeatureMissesSeen.insert(MissingFeatures); 11169 11170 // Special case: don't report a feature set which includes arm-mode for 11171 // targets that don't have ARM mode. 11172 if (MissingFeatures.test(Feature_IsARMBit) && !hasARM()) 11173 break; 11174 // Don't report any near-misses that both require switching instruction 11175 // set, and adding other subtarget features. 11176 if (isThumb() && MissingFeatures.test(Feature_IsARMBit) && 11177 MissingFeatures.count() > 1) 11178 break; 11179 if (!isThumb() && MissingFeatures.test(Feature_IsThumbBit) && 11180 MissingFeatures.count() > 1) 11181 break; 11182 if (!isThumb() && MissingFeatures.test(Feature_IsThumb2Bit) && 11183 (MissingFeatures & ~FeatureBitset({Feature_IsThumb2Bit, 11184 Feature_IsThumbBit})).any()) 11185 break; 11186 if (isMClass() && MissingFeatures.test(Feature_HasNEONBit)) 11187 break; 11188 11189 NearMissMessage Message; 11190 Message.Loc = IDLoc; 11191 raw_svector_ostream OS(Message.Message); 11192 11193 OS << "instruction requires:"; 11194 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) 11195 if (MissingFeatures.test(i)) 11196 OS << ' ' << getSubtargetFeatureName(i); 11197 11198 NearMissesOut.emplace_back(Message); 11199 11200 break; 11201 } 11202 case NearMissInfo::NearMissPredicate: { 11203 NearMissMessage Message; 11204 Message.Loc = IDLoc; 11205 switch (I.getPredicateError()) { 11206 case Match_RequiresNotITBlock: 11207 Message.Message = "flag setting instruction only valid outside IT block"; 11208 break; 11209 case Match_RequiresITBlock: 11210 Message.Message = "instruction only valid inside IT block"; 11211 break; 11212 case Match_RequiresV6: 11213 Message.Message = "instruction variant requires ARMv6 or later"; 11214 break; 11215 case Match_RequiresThumb2: 11216 Message.Message = "instruction variant requires Thumb2"; 11217 break; 11218 case Match_RequiresV8: 11219 Message.Message = "instruction variant requires ARMv8 or later"; 11220 break; 11221 case Match_RequiresFlagSetting: 11222 Message.Message = "no flag-preserving variant of this instruction available"; 11223 break; 11224 case Match_InvalidOperand: 11225 Message.Message = "invalid operand for instruction"; 11226 break; 11227 default: 11228 llvm_unreachable("Unhandled target predicate error"); 11229 break; 11230 } 11231 NearMissesOut.emplace_back(Message); 11232 break; 11233 } 11234 case NearMissInfo::NearMissTooFewOperands: { 11235 if (!ReportedTooFewOperands) { 11236 SMLoc EndLoc = ((ARMOperand &)*Operands.back()).getEndLoc(); 11237 NearMissesOut.emplace_back(NearMissMessage{ 11238 EndLoc, StringRef("too few operands for instruction")}); 11239 ReportedTooFewOperands = true; 11240 } 11241 break; 11242 } 11243 case NearMissInfo::NoNearMiss: 11244 // This should never leave the matcher. 11245 llvm_unreachable("not a near-miss"); 11246 break; 11247 } 11248 } 11249 } 11250 11251 void ARMAsmParser::ReportNearMisses(SmallVectorImpl<NearMissInfo> &NearMisses, 11252 SMLoc IDLoc, OperandVector &Operands) { 11253 SmallVector<NearMissMessage, 4> Messages; 11254 FilterNearMisses(NearMisses, Messages, IDLoc, Operands); 11255 11256 if (Messages.size() == 0) { 11257 // No near-misses were found, so the best we can do is "invalid 11258 // instruction". 11259 Error(IDLoc, "invalid instruction"); 11260 } else if (Messages.size() == 1) { 11261 // One near miss was found, report it as the sole error. 11262 Error(Messages[0].Loc, Messages[0].Message); 11263 } else { 11264 // More than one near miss, so report a generic "invalid instruction" 11265 // error, followed by notes for each of the near-misses. 11266 Error(IDLoc, "invalid instruction, any one of the following would fix this:"); 11267 for (auto &M : Messages) { 11268 Note(M.Loc, M.Message); 11269 } 11270 } 11271 } 11272 11273 /// parseDirectiveArchExtension 11274 /// ::= .arch_extension [no]feature 11275 bool ARMAsmParser::parseDirectiveArchExtension(SMLoc L) { 11276 // FIXME: This structure should be moved inside ARMTargetParser 11277 // when we start to table-generate them, and we can use the ARM 11278 // flags below, that were generated by table-gen. 11279 static const struct { 11280 const unsigned Kind; 11281 const FeatureBitset ArchCheck; 11282 const FeatureBitset Features; 11283 } Extensions[] = { 11284 { ARM::AEK_CRC, {Feature_HasV8Bit}, {ARM::FeatureCRC} }, 11285 { ARM::AEK_CRYPTO, {Feature_HasV8Bit}, 11286 {ARM::FeatureCrypto, ARM::FeatureNEON, ARM::FeatureFPARMv8} }, 11287 { ARM::AEK_FP, {Feature_HasV8Bit}, 11288 {ARM::FeatureVFP2_D16_SP, ARM::FeatureFPARMv8} }, 11289 { (ARM::AEK_HWDIVTHUMB | ARM::AEK_HWDIVARM), 11290 {Feature_HasV7Bit, Feature_IsNotMClassBit}, 11291 {ARM::FeatureHWDivThumb, ARM::FeatureHWDivARM} }, 11292 { ARM::AEK_MP, {Feature_HasV7Bit, Feature_IsNotMClassBit}, 11293 {ARM::FeatureMP} }, 11294 { ARM::AEK_SIMD, {Feature_HasV8Bit}, 11295 {ARM::FeatureNEON, ARM::FeatureVFP2_D16_SP, ARM::FeatureFPARMv8} }, 11296 { ARM::AEK_SEC, {Feature_HasV6KBit}, {ARM::FeatureTrustZone} }, 11297 // FIXME: Only available in A-class, isel not predicated 11298 { ARM::AEK_VIRT, {Feature_HasV7Bit}, {ARM::FeatureVirtualization} }, 11299 { ARM::AEK_FP16, {Feature_HasV8_2aBit}, 11300 {ARM::FeatureFPARMv8, ARM::FeatureFullFP16} }, 11301 { ARM::AEK_RAS, {Feature_HasV8Bit}, {ARM::FeatureRAS} }, 11302 { ARM::AEK_LOB, {Feature_HasV8_1MMainlineBit}, {ARM::FeatureLOB} }, 11303 // FIXME: Unsupported extensions. 11304 { ARM::AEK_OS, {}, {} }, 11305 { ARM::AEK_IWMMXT, {}, {} }, 11306 { ARM::AEK_IWMMXT2, {}, {} }, 11307 { ARM::AEK_MAVERICK, {}, {} }, 11308 { ARM::AEK_XSCALE, {}, {} }, 11309 }; 11310 11311 MCAsmParser &Parser = getParser(); 11312 11313 if (getLexer().isNot(AsmToken::Identifier)) 11314 return Error(getLexer().getLoc(), "expected architecture extension name"); 11315 11316 StringRef Name = Parser.getTok().getString(); 11317 SMLoc ExtLoc = Parser.getTok().getLoc(); 11318 Lex(); 11319 11320 if (parseToken(AsmToken::EndOfStatement, 11321 "unexpected token in '.arch_extension' directive")) 11322 return true; 11323 11324 bool EnableFeature = true; 11325 if (Name.startswith_lower("no")) { 11326 EnableFeature = false; 11327 Name = Name.substr(2); 11328 } 11329 unsigned FeatureKind = ARM::parseArchExt(Name); 11330 if (FeatureKind == ARM::AEK_INVALID) 11331 return Error(ExtLoc, "unknown architectural extension: " + Name); 11332 11333 for (const auto &Extension : Extensions) { 11334 if (Extension.Kind != FeatureKind) 11335 continue; 11336 11337 if (Extension.Features.none()) 11338 return Error(ExtLoc, "unsupported architectural extension: " + Name); 11339 11340 if ((getAvailableFeatures() & Extension.ArchCheck) != Extension.ArchCheck) 11341 return Error(ExtLoc, "architectural extension '" + Name + 11342 "' is not " 11343 "allowed for the current base architecture"); 11344 11345 MCSubtargetInfo &STI = copySTI(); 11346 if (EnableFeature) { 11347 STI.SetFeatureBitsTransitively(Extension.Features); 11348 } else { 11349 STI.ClearFeatureBitsTransitively(Extension.Features); 11350 } 11351 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits()); 11352 setAvailableFeatures(Features); 11353 return false; 11354 } 11355 11356 return Error(ExtLoc, "unknown architectural extension: " + Name); 11357 } 11358 11359 // Define this matcher function after the auto-generated include so we 11360 // have the match class enum definitions. 11361 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp, 11362 unsigned Kind) { 11363 ARMOperand &Op = static_cast<ARMOperand &>(AsmOp); 11364 // If the kind is a token for a literal immediate, check if our asm 11365 // operand matches. This is for InstAliases which have a fixed-value 11366 // immediate in the syntax. 11367 switch (Kind) { 11368 default: break; 11369 case MCK__35_0: 11370 if (Op.isImm()) 11371 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm())) 11372 if (CE->getValue() == 0) 11373 return Match_Success; 11374 break; 11375 case MCK__35_8: 11376 if (Op.isImm()) 11377 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm())) 11378 if (CE->getValue() == 8) 11379 return Match_Success; 11380 break; 11381 case MCK__35_16: 11382 if (Op.isImm()) 11383 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm())) 11384 if (CE->getValue() == 16) 11385 return Match_Success; 11386 break; 11387 case MCK_ModImm: 11388 if (Op.isImm()) { 11389 const MCExpr *SOExpr = Op.getImm(); 11390 int64_t Value; 11391 if (!SOExpr->evaluateAsAbsolute(Value)) 11392 return Match_Success; 11393 assert((Value >= std::numeric_limits<int32_t>::min() && 11394 Value <= std::numeric_limits<uint32_t>::max()) && 11395 "expression value must be representable in 32 bits"); 11396 } 11397 break; 11398 case MCK_rGPR: 11399 if (hasV8Ops() && Op.isReg() && Op.getReg() == ARM::SP) 11400 return Match_Success; 11401 return Match_rGPR; 11402 case MCK_GPRPair: 11403 if (Op.isReg() && 11404 MRI->getRegClass(ARM::GPRRegClassID).contains(Op.getReg())) 11405 return Match_Success; 11406 break; 11407 } 11408 return Match_InvalidOperand; 11409 } 11410 11411 bool ARMAsmParser::isMnemonicVPTPredicable(StringRef Mnemonic, 11412 StringRef ExtraToken) { 11413 if (!hasMVE()) 11414 return false; 11415 11416 return Mnemonic.startswith("vabav") || Mnemonic.startswith("vaddv") || 11417 Mnemonic.startswith("vaddlv") || Mnemonic.startswith("vminnmv") || 11418 Mnemonic.startswith("vminnmav") || Mnemonic.startswith("vminv") || 11419 Mnemonic.startswith("vminav") || Mnemonic.startswith("vmaxnmv") || 11420 Mnemonic.startswith("vmaxnmav") || Mnemonic.startswith("vmaxv") || 11421 Mnemonic.startswith("vmaxav") || Mnemonic.startswith("vmladav") || 11422 Mnemonic.startswith("vrmlaldavh") || Mnemonic.startswith("vrmlalvh") || 11423 Mnemonic.startswith("vmlsdav") || Mnemonic.startswith("vmlav") || 11424 Mnemonic.startswith("vmlaldav") || Mnemonic.startswith("vmlalv") || 11425 Mnemonic.startswith("vmaxnm") || Mnemonic.startswith("vminnm") || 11426 Mnemonic.startswith("vmax") || Mnemonic.startswith("vmin") || 11427 Mnemonic.startswith("vshlc") || Mnemonic.startswith("vmovlt") || 11428 Mnemonic.startswith("vmovlb") || Mnemonic.startswith("vshll") || 11429 Mnemonic.startswith("vrshrn") || Mnemonic.startswith("vshrn") || 11430 Mnemonic.startswith("vqrshrun") || Mnemonic.startswith("vqshrun") || 11431 Mnemonic.startswith("vqrshrn") || Mnemonic.startswith("vqshrn") || 11432 Mnemonic.startswith("vbic") || Mnemonic.startswith("vrev64") || 11433 Mnemonic.startswith("vrev32") || Mnemonic.startswith("vrev16") || 11434 Mnemonic.startswith("vmvn") || Mnemonic.startswith("veor") || 11435 Mnemonic.startswith("vorn") || Mnemonic.startswith("vorr") || 11436 Mnemonic.startswith("vand") || Mnemonic.startswith("vmul") || 11437 Mnemonic.startswith("vqrdmulh") || Mnemonic.startswith("vqdmulh") || 11438 Mnemonic.startswith("vsub") || Mnemonic.startswith("vadd") || 11439 Mnemonic.startswith("vqsub") || Mnemonic.startswith("vqadd") || 11440 Mnemonic.startswith("vabd") || Mnemonic.startswith("vrhadd") || 11441 Mnemonic.startswith("vhsub") || Mnemonic.startswith("vhadd") || 11442 Mnemonic.startswith("vdup") || Mnemonic.startswith("vcls") || 11443 Mnemonic.startswith("vclz") || Mnemonic.startswith("vneg") || 11444 Mnemonic.startswith("vabs") || Mnemonic.startswith("vqneg") || 11445 Mnemonic.startswith("vqabs") || 11446 (Mnemonic.startswith("vrint") && Mnemonic != "vrintr") || 11447 Mnemonic.startswith("vcmla") || Mnemonic.startswith("vfma") || 11448 Mnemonic.startswith("vfms") || Mnemonic.startswith("vcadd") || 11449 Mnemonic.startswith("vadd") || Mnemonic.startswith("vsub") || 11450 Mnemonic.startswith("vshl") || Mnemonic.startswith("vqshl") || 11451 Mnemonic.startswith("vqrshl") || Mnemonic.startswith("vrshl") || 11452 Mnemonic.startswith("vsri") || Mnemonic.startswith("vsli") || 11453 Mnemonic.startswith("vrshr") || Mnemonic.startswith("vshr") || 11454 Mnemonic.startswith("vpsel") || Mnemonic.startswith("vcmp") || 11455 Mnemonic.startswith("vqdmladh") || Mnemonic.startswith("vqrdmladh") || 11456 Mnemonic.startswith("vqdmlsdh") || Mnemonic.startswith("vqrdmlsdh") || 11457 Mnemonic.startswith("vcmul") || Mnemonic.startswith("vrmulh") || 11458 Mnemonic.startswith("vqmovn") || Mnemonic.startswith("vqmovun") || 11459 Mnemonic.startswith("vmovnt") || Mnemonic.startswith("vmovnb") || 11460 Mnemonic.startswith("vmaxa") || Mnemonic.startswith("vmaxnma") || 11461 Mnemonic.startswith("vhcadd") || Mnemonic.startswith("vadc") || 11462 Mnemonic.startswith("vsbc") || Mnemonic.startswith("vrshr") || 11463 Mnemonic.startswith("vshr") || Mnemonic.startswith("vstrb") || 11464 Mnemonic.startswith("vldrb") || 11465 (Mnemonic.startswith("vstrh") && Mnemonic != "vstrhi") || 11466 (Mnemonic.startswith("vldrh") && Mnemonic != "vldrhi") || 11467 Mnemonic.startswith("vstrw") || Mnemonic.startswith("vldrw") || 11468 Mnemonic.startswith("vldrd") || Mnemonic.startswith("vstrd") || 11469 Mnemonic.startswith("vqdmull") || Mnemonic.startswith("vbrsr") || 11470 Mnemonic.startswith("vfmas") || Mnemonic.startswith("vmlas") || 11471 Mnemonic.startswith("vmla") || Mnemonic.startswith("vqdmlash") || 11472 Mnemonic.startswith("vqdmlah") || Mnemonic.startswith("vqrdmlash") || 11473 Mnemonic.startswith("vqrdmlah") || Mnemonic.startswith("viwdup") || 11474 Mnemonic.startswith("vdwdup") || Mnemonic.startswith("vidup") || 11475 Mnemonic.startswith("vddup") || Mnemonic.startswith("vctp") || 11476 Mnemonic.startswith("vpnot") || Mnemonic.startswith("vbic") || 11477 Mnemonic.startswith("vrmlsldavh") || Mnemonic.startswith("vmlsldav") || 11478 Mnemonic.startswith("vcvt") || 11479 (Mnemonic.startswith("vmov") && 11480 !(ExtraToken == ".f16" || ExtraToken == ".32" || 11481 ExtraToken == ".16" || ExtraToken == ".8")); 11482 } 11483