1 //===-- PPCISelDAGToDAG.cpp - PPC --pattern matching inst selector --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines a pattern matching instruction selector for PowerPC, 10 // converting from a legalized dag to a PPC dag. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MCTargetDesc/PPCMCTargetDesc.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPC.h" 17 #include "PPCISelLowering.h" 18 #include "PPCMachineFunctionInfo.h" 19 #include "PPCSubtarget.h" 20 #include "PPCTargetMachine.h" 21 #include "llvm/ADT/APInt.h" 22 #include "llvm/ADT/DenseMap.h" 23 #include "llvm/ADT/STLExtras.h" 24 #include "llvm/ADT/SmallPtrSet.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/Statistic.h" 27 #include "llvm/Analysis/BranchProbabilityInfo.h" 28 #include "llvm/CodeGen/FunctionLoweringInfo.h" 29 #include "llvm/CodeGen/ISDOpcodes.h" 30 #include "llvm/CodeGen/MachineBasicBlock.h" 31 #include "llvm/CodeGen/MachineFunction.h" 32 #include "llvm/CodeGen/MachineInstrBuilder.h" 33 #include "llvm/CodeGen/MachineRegisterInfo.h" 34 #include "llvm/CodeGen/SelectionDAG.h" 35 #include "llvm/CodeGen/SelectionDAGISel.h" 36 #include "llvm/CodeGen/SelectionDAGNodes.h" 37 #include "llvm/CodeGen/TargetInstrInfo.h" 38 #include "llvm/CodeGen/TargetRegisterInfo.h" 39 #include "llvm/CodeGen/ValueTypes.h" 40 #include "llvm/IR/BasicBlock.h" 41 #include "llvm/IR/DebugLoc.h" 42 #include "llvm/IR/Function.h" 43 #include "llvm/IR/GlobalValue.h" 44 #include "llvm/IR/InlineAsm.h" 45 #include "llvm/IR/InstrTypes.h" 46 #include "llvm/IR/IntrinsicsPowerPC.h" 47 #include "llvm/IR/Module.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Support/CodeGen.h" 50 #include "llvm/Support/CommandLine.h" 51 #include "llvm/Support/Compiler.h" 52 #include "llvm/Support/Debug.h" 53 #include "llvm/Support/ErrorHandling.h" 54 #include "llvm/Support/KnownBits.h" 55 #include "llvm/Support/MachineValueType.h" 56 #include "llvm/Support/MathExtras.h" 57 #include "llvm/Support/raw_ostream.h" 58 #include <algorithm> 59 #include <cassert> 60 #include <cstdint> 61 #include <iterator> 62 #include <limits> 63 #include <memory> 64 #include <new> 65 #include <tuple> 66 #include <utility> 67 68 using namespace llvm; 69 70 #define DEBUG_TYPE "ppc-codegen" 71 72 STATISTIC(NumSextSetcc, 73 "Number of (sext(setcc)) nodes expanded into GPR sequence."); 74 STATISTIC(NumZextSetcc, 75 "Number of (zext(setcc)) nodes expanded into GPR sequence."); 76 STATISTIC(SignExtensionsAdded, 77 "Number of sign extensions for compare inputs added."); 78 STATISTIC(ZeroExtensionsAdded, 79 "Number of zero extensions for compare inputs added."); 80 STATISTIC(NumLogicOpsOnComparison, 81 "Number of logical ops on i1 values calculated in GPR."); 82 STATISTIC(OmittedForNonExtendUses, 83 "Number of compares not eliminated as they have non-extending uses."); 84 STATISTIC(NumP9Setb, 85 "Number of compares lowered to setb."); 86 87 // FIXME: Remove this once the bug has been fixed! 88 cl::opt<bool> ANDIGlueBug("expose-ppc-andi-glue-bug", 89 cl::desc("expose the ANDI glue bug on PPC"), cl::Hidden); 90 91 static cl::opt<bool> 92 UseBitPermRewriter("ppc-use-bit-perm-rewriter", cl::init(true), 93 cl::desc("use aggressive ppc isel for bit permutations"), 94 cl::Hidden); 95 static cl::opt<bool> BPermRewriterNoMasking( 96 "ppc-bit-perm-rewriter-stress-rotates", 97 cl::desc("stress rotate selection in aggressive ppc isel for " 98 "bit permutations"), 99 cl::Hidden); 100 101 static cl::opt<bool> EnableBranchHint( 102 "ppc-use-branch-hint", cl::init(true), 103 cl::desc("Enable static hinting of branches on ppc"), 104 cl::Hidden); 105 106 static cl::opt<bool> EnableTLSOpt( 107 "ppc-tls-opt", cl::init(true), 108 cl::desc("Enable tls optimization peephole"), 109 cl::Hidden); 110 111 enum ICmpInGPRType { ICGPR_All, ICGPR_None, ICGPR_I32, ICGPR_I64, 112 ICGPR_NonExtIn, ICGPR_Zext, ICGPR_Sext, ICGPR_ZextI32, 113 ICGPR_SextI32, ICGPR_ZextI64, ICGPR_SextI64 }; 114 115 static cl::opt<ICmpInGPRType> CmpInGPR( 116 "ppc-gpr-icmps", cl::Hidden, cl::init(ICGPR_All), 117 cl::desc("Specify the types of comparisons to emit GPR-only code for."), 118 cl::values(clEnumValN(ICGPR_None, "none", "Do not modify integer comparisons."), 119 clEnumValN(ICGPR_All, "all", "All possible int comparisons in GPRs."), 120 clEnumValN(ICGPR_I32, "i32", "Only i32 comparisons in GPRs."), 121 clEnumValN(ICGPR_I64, "i64", "Only i64 comparisons in GPRs."), 122 clEnumValN(ICGPR_NonExtIn, "nonextin", 123 "Only comparisons where inputs don't need [sz]ext."), 124 clEnumValN(ICGPR_Zext, "zext", "Only comparisons with zext result."), 125 clEnumValN(ICGPR_ZextI32, "zexti32", 126 "Only i32 comparisons with zext result."), 127 clEnumValN(ICGPR_ZextI64, "zexti64", 128 "Only i64 comparisons with zext result."), 129 clEnumValN(ICGPR_Sext, "sext", "Only comparisons with sext result."), 130 clEnumValN(ICGPR_SextI32, "sexti32", 131 "Only i32 comparisons with sext result."), 132 clEnumValN(ICGPR_SextI64, "sexti64", 133 "Only i64 comparisons with sext result."))); 134 namespace { 135 136 //===--------------------------------------------------------------------===// 137 /// PPCDAGToDAGISel - PPC specific code to select PPC machine 138 /// instructions for SelectionDAG operations. 139 /// 140 class PPCDAGToDAGISel : public SelectionDAGISel { 141 const PPCTargetMachine &TM; 142 const PPCSubtarget *Subtarget = nullptr; 143 const PPCTargetLowering *PPCLowering = nullptr; 144 unsigned GlobalBaseReg = 0; 145 146 public: 147 explicit PPCDAGToDAGISel(PPCTargetMachine &tm, CodeGenOpt::Level OptLevel) 148 : SelectionDAGISel(tm, OptLevel), TM(tm) {} 149 150 bool runOnMachineFunction(MachineFunction &MF) override { 151 // Make sure we re-emit a set of the global base reg if necessary 152 GlobalBaseReg = 0; 153 Subtarget = &MF.getSubtarget<PPCSubtarget>(); 154 PPCLowering = Subtarget->getTargetLowering(); 155 if (Subtarget->hasROPProtect()) { 156 // Create a place on the stack for the ROP Protection Hash. 157 // The ROP Protection Hash will always be 8 bytes and aligned to 8 158 // bytes. 159 MachineFrameInfo &MFI = MF.getFrameInfo(); 160 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>(); 161 const int Result = MFI.CreateStackObject(8, Align(8), false); 162 FI->setROPProtectionHashSaveIndex(Result); 163 } 164 SelectionDAGISel::runOnMachineFunction(MF); 165 166 return true; 167 } 168 169 void PreprocessISelDAG() override; 170 void PostprocessISelDAG() override; 171 172 /// getI16Imm - Return a target constant with the specified value, of type 173 /// i16. 174 inline SDValue getI16Imm(unsigned Imm, const SDLoc &dl) { 175 return CurDAG->getTargetConstant(Imm, dl, MVT::i16); 176 } 177 178 /// getI32Imm - Return a target constant with the specified value, of type 179 /// i32. 180 inline SDValue getI32Imm(unsigned Imm, const SDLoc &dl) { 181 return CurDAG->getTargetConstant(Imm, dl, MVT::i32); 182 } 183 184 /// getI64Imm - Return a target constant with the specified value, of type 185 /// i64. 186 inline SDValue getI64Imm(uint64_t Imm, const SDLoc &dl) { 187 return CurDAG->getTargetConstant(Imm, dl, MVT::i64); 188 } 189 190 /// getSmallIPtrImm - Return a target constant of pointer type. 191 inline SDValue getSmallIPtrImm(unsigned Imm, const SDLoc &dl) { 192 return CurDAG->getTargetConstant( 193 Imm, dl, PPCLowering->getPointerTy(CurDAG->getDataLayout())); 194 } 195 196 /// isRotateAndMask - Returns true if Mask and Shift can be folded into a 197 /// rotate and mask opcode and mask operation. 198 static bool isRotateAndMask(SDNode *N, unsigned Mask, bool isShiftMask, 199 unsigned &SH, unsigned &MB, unsigned &ME); 200 201 /// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC 202 /// base register. Return the virtual register that holds this value. 203 SDNode *getGlobalBaseReg(); 204 205 void selectFrameIndex(SDNode *SN, SDNode *N, unsigned Offset = 0); 206 207 // Select - Convert the specified operand from a target-independent to a 208 // target-specific node if it hasn't already been changed. 209 void Select(SDNode *N) override; 210 211 bool tryBitfieldInsert(SDNode *N); 212 bool tryBitPermutation(SDNode *N); 213 bool tryIntCompareInGPR(SDNode *N); 214 215 // tryTLSXFormLoad - Convert an ISD::LOAD fed by a PPCISD::ADD_TLS into 216 // an X-Form load instruction with the offset being a relocation coming from 217 // the PPCISD::ADD_TLS. 218 bool tryTLSXFormLoad(LoadSDNode *N); 219 // tryTLSXFormStore - Convert an ISD::STORE fed by a PPCISD::ADD_TLS into 220 // an X-Form store instruction with the offset being a relocation coming from 221 // the PPCISD::ADD_TLS. 222 bool tryTLSXFormStore(StoreSDNode *N); 223 /// SelectCC - Select a comparison of the specified values with the 224 /// specified condition code, returning the CR# of the expression. 225 SDValue SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, 226 const SDLoc &dl, SDValue Chain = SDValue()); 227 228 /// SelectAddrImmOffs - Return true if the operand is valid for a preinc 229 /// immediate field. Note that the operand at this point is already the 230 /// result of a prior SelectAddressRegImm call. 231 bool SelectAddrImmOffs(SDValue N, SDValue &Out) const { 232 if (N.getOpcode() == ISD::TargetConstant || 233 N.getOpcode() == ISD::TargetGlobalAddress) { 234 Out = N; 235 return true; 236 } 237 238 return false; 239 } 240 241 /// SelectDSForm - Returns true if address N can be represented by the 242 /// addressing mode of DSForm instructions (a base register, plus a signed 243 /// 16-bit displacement that is a multiple of 4. 244 bool SelectDSForm(SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base) { 245 return PPCLowering->SelectOptimalAddrMode(Parent, N, Disp, Base, *CurDAG, 246 Align(4)) == PPC::AM_DSForm; 247 } 248 249 /// SelectDQForm - Returns true if address N can be represented by the 250 /// addressing mode of DQForm instructions (a base register, plus a signed 251 /// 16-bit displacement that is a multiple of 16. 252 bool SelectDQForm(SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base) { 253 return PPCLowering->SelectOptimalAddrMode(Parent, N, Disp, Base, *CurDAG, 254 Align(16)) == PPC::AM_DQForm; 255 } 256 257 /// SelectDForm - Returns true if address N can be represented by 258 /// the addressing mode of DForm instructions (a base register, plus a 259 /// signed 16-bit immediate. 260 bool SelectDForm(SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base) { 261 return PPCLowering->SelectOptimalAddrMode(Parent, N, Disp, Base, *CurDAG, 262 None) == PPC::AM_DForm; 263 } 264 265 /// SelectPCRelForm - Returns true if address N can be represented by 266 /// PC-Relative addressing mode. 267 bool SelectPCRelForm(SDNode *Parent, SDValue N, SDValue &Disp, 268 SDValue &Base) { 269 return PPCLowering->SelectOptimalAddrMode(Parent, N, Disp, Base, *CurDAG, 270 None) == PPC::AM_PCRel; 271 } 272 273 /// SelectPDForm - Returns true if address N can be represented by Prefixed 274 /// DForm addressing mode (a base register, plus a signed 34-bit immediate. 275 bool SelectPDForm(SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base) { 276 return PPCLowering->SelectOptimalAddrMode(Parent, N, Disp, Base, *CurDAG, 277 None) == PPC::AM_PrefixDForm; 278 } 279 280 /// SelectXForm - Returns true if address N can be represented by the 281 /// addressing mode of XForm instructions (an indexed [r+r] operation). 282 bool SelectXForm(SDNode *Parent, SDValue N, SDValue &Disp, SDValue &Base) { 283 return PPCLowering->SelectOptimalAddrMode(Parent, N, Disp, Base, *CurDAG, 284 None) == PPC::AM_XForm; 285 } 286 287 /// SelectForceXForm - Given the specified address, force it to be 288 /// represented as an indexed [r+r] operation (an XForm instruction). 289 bool SelectForceXForm(SDNode *Parent, SDValue N, SDValue &Disp, 290 SDValue &Base) { 291 return PPCLowering->SelectForceXFormMode(N, Disp, Base, *CurDAG) == 292 PPC::AM_XForm; 293 } 294 295 /// SelectAddrIdx - Given the specified address, check to see if it can be 296 /// represented as an indexed [r+r] operation. 297 /// This is for xform instructions whose associated displacement form is D. 298 /// The last parameter \p 0 means associated D form has no requirment for 16 299 /// bit signed displacement. 300 /// Returns false if it can be represented by [r+imm], which are preferred. 301 bool SelectAddrIdx(SDValue N, SDValue &Base, SDValue &Index) { 302 return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, None); 303 } 304 305 /// SelectAddrIdx4 - Given the specified address, check to see if it can be 306 /// represented as an indexed [r+r] operation. 307 /// This is for xform instructions whose associated displacement form is DS. 308 /// The last parameter \p 4 means associated DS form 16 bit signed 309 /// displacement must be a multiple of 4. 310 /// Returns false if it can be represented by [r+imm], which are preferred. 311 bool SelectAddrIdxX4(SDValue N, SDValue &Base, SDValue &Index) { 312 return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, 313 Align(4)); 314 } 315 316 /// SelectAddrIdx16 - Given the specified address, check to see if it can be 317 /// represented as an indexed [r+r] operation. 318 /// This is for xform instructions whose associated displacement form is DQ. 319 /// The last parameter \p 16 means associated DQ form 16 bit signed 320 /// displacement must be a multiple of 16. 321 /// Returns false if it can be represented by [r+imm], which are preferred. 322 bool SelectAddrIdxX16(SDValue N, SDValue &Base, SDValue &Index) { 323 return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, 324 Align(16)); 325 } 326 327 /// SelectAddrIdxOnly - Given the specified address, force it to be 328 /// represented as an indexed [r+r] operation. 329 bool SelectAddrIdxOnly(SDValue N, SDValue &Base, SDValue &Index) { 330 return PPCLowering->SelectAddressRegRegOnly(N, Base, Index, *CurDAG); 331 } 332 333 /// SelectAddrImm - Returns true if the address N can be represented by 334 /// a base register plus a signed 16-bit displacement [r+imm]. 335 /// The last parameter \p 0 means D form has no requirment for 16 bit signed 336 /// displacement. 337 bool SelectAddrImm(SDValue N, SDValue &Disp, 338 SDValue &Base) { 339 return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, None); 340 } 341 342 /// SelectAddrImmX4 - Returns true if the address N can be represented by 343 /// a base register plus a signed 16-bit displacement that is a multiple of 344 /// 4 (last parameter). Suitable for use by STD and friends. 345 bool SelectAddrImmX4(SDValue N, SDValue &Disp, SDValue &Base) { 346 return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, Align(4)); 347 } 348 349 /// SelectAddrImmX16 - Returns true if the address N can be represented by 350 /// a base register plus a signed 16-bit displacement that is a multiple of 351 /// 16(last parameter). Suitable for use by STXV and friends. 352 bool SelectAddrImmX16(SDValue N, SDValue &Disp, SDValue &Base) { 353 return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 354 Align(16)); 355 } 356 357 /// SelectAddrImmX34 - Returns true if the address N can be represented by 358 /// a base register plus a signed 34-bit displacement. Suitable for use by 359 /// PSTXVP and friends. 360 bool SelectAddrImmX34(SDValue N, SDValue &Disp, SDValue &Base) { 361 return PPCLowering->SelectAddressRegImm34(N, Disp, Base, *CurDAG); 362 } 363 364 // Select an address into a single register. 365 bool SelectAddr(SDValue N, SDValue &Base) { 366 Base = N; 367 return true; 368 } 369 370 bool SelectAddrPCRel(SDValue N, SDValue &Base) { 371 return PPCLowering->SelectAddressPCRel(N, Base); 372 } 373 374 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for 375 /// inline asm expressions. It is always correct to compute the value into 376 /// a register. The case of adding a (possibly relocatable) constant to a 377 /// register can be improved, but it is wrong to substitute Reg+Reg for 378 /// Reg in an asm, because the load or store opcode would have to change. 379 bool SelectInlineAsmMemoryOperand(const SDValue &Op, 380 unsigned ConstraintID, 381 std::vector<SDValue> &OutOps) override { 382 switch(ConstraintID) { 383 default: 384 errs() << "ConstraintID: " << ConstraintID << "\n"; 385 llvm_unreachable("Unexpected asm memory constraint"); 386 case InlineAsm::Constraint_es: 387 case InlineAsm::Constraint_m: 388 case InlineAsm::Constraint_o: 389 case InlineAsm::Constraint_Q: 390 case InlineAsm::Constraint_Z: 391 case InlineAsm::Constraint_Zy: 392 // We need to make sure that this one operand does not end up in r0 393 // (because we might end up lowering this as 0(%op)). 394 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo(); 395 const TargetRegisterClass *TRC = TRI->getPointerRegClass(*MF, /*Kind=*/1); 396 SDLoc dl(Op); 397 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), dl, MVT::i32); 398 SDValue NewOp = 399 SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS, 400 dl, Op.getValueType(), 401 Op, RC), 0); 402 403 OutOps.push_back(NewOp); 404 return false; 405 } 406 return true; 407 } 408 409 StringRef getPassName() const override { 410 return "PowerPC DAG->DAG Pattern Instruction Selection"; 411 } 412 413 // Include the pieces autogenerated from the target description. 414 #include "PPCGenDAGISel.inc" 415 416 private: 417 bool trySETCC(SDNode *N); 418 bool tryFoldSWTestBRCC(SDNode *N); 419 bool tryAsSingleRLDICL(SDNode *N); 420 bool tryAsSingleRLDICR(SDNode *N); 421 bool tryAsSingleRLWINM(SDNode *N); 422 bool tryAsSingleRLWINM8(SDNode *N); 423 bool tryAsSingleRLWIMI(SDNode *N); 424 bool tryAsPairOfRLDICL(SDNode *N); 425 bool tryAsSingleRLDIMI(SDNode *N); 426 427 void PeepholePPC64(); 428 void PeepholePPC64ZExt(); 429 void PeepholeCROps(); 430 431 SDValue combineToCMPB(SDNode *N); 432 void foldBoolExts(SDValue &Res, SDNode *&N); 433 434 bool AllUsersSelectZero(SDNode *N); 435 void SwapAllSelectUsers(SDNode *N); 436 437 bool isOffsetMultipleOf(SDNode *N, unsigned Val) const; 438 void transferMemOperands(SDNode *N, SDNode *Result); 439 }; 440 441 } // end anonymous namespace 442 443 /// getGlobalBaseReg - Output the instructions required to put the 444 /// base address to use for accessing globals into a register. 445 /// 446 SDNode *PPCDAGToDAGISel::getGlobalBaseReg() { 447 if (!GlobalBaseReg) { 448 const TargetInstrInfo &TII = *Subtarget->getInstrInfo(); 449 // Insert the set of GlobalBaseReg into the first MBB of the function 450 MachineBasicBlock &FirstMBB = MF->front(); 451 MachineBasicBlock::iterator MBBI = FirstMBB.begin(); 452 const Module *M = MF->getFunction().getParent(); 453 DebugLoc dl; 454 455 if (PPCLowering->getPointerTy(CurDAG->getDataLayout()) == MVT::i32) { 456 if (Subtarget->isTargetELF()) { 457 GlobalBaseReg = PPC::R30; 458 if (!Subtarget->isSecurePlt() && 459 M->getPICLevel() == PICLevel::SmallPIC) { 460 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MoveGOTtoLR)); 461 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg); 462 MF->getInfo<PPCFunctionInfo>()->setUsesPICBase(true); 463 } else { 464 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR)); 465 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg); 466 Register TempReg = RegInfo->createVirtualRegister(&PPC::GPRCRegClass); 467 BuildMI(FirstMBB, MBBI, dl, 468 TII.get(PPC::UpdateGBR), GlobalBaseReg) 469 .addReg(TempReg, RegState::Define).addReg(GlobalBaseReg); 470 MF->getInfo<PPCFunctionInfo>()->setUsesPICBase(true); 471 } 472 } else { 473 GlobalBaseReg = 474 RegInfo->createVirtualRegister(&PPC::GPRC_and_GPRC_NOR0RegClass); 475 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR)); 476 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR), GlobalBaseReg); 477 } 478 } else { 479 // We must ensure that this sequence is dominated by the prologue. 480 // FIXME: This is a bit of a big hammer since we don't get the benefits 481 // of shrink-wrapping whenever we emit this instruction. Considering 482 // this is used in any function where we emit a jump table, this may be 483 // a significant limitation. We should consider inserting this in the 484 // block where it is used and then commoning this sequence up if it 485 // appears in multiple places. 486 // Note: on ISA 3.0 cores, we can use lnia (addpcis) instead of 487 // MovePCtoLR8. 488 MF->getInfo<PPCFunctionInfo>()->setShrinkWrapDisabled(true); 489 GlobalBaseReg = RegInfo->createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass); 490 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MovePCtoLR8)); 491 BuildMI(FirstMBB, MBBI, dl, TII.get(PPC::MFLR8), GlobalBaseReg); 492 } 493 } 494 return CurDAG->getRegister(GlobalBaseReg, 495 PPCLowering->getPointerTy(CurDAG->getDataLayout())) 496 .getNode(); 497 } 498 499 // Check if a SDValue has the toc-data attribute. 500 static bool hasTocDataAttr(SDValue Val, unsigned PointerSize) { 501 GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Val); 502 if (!GA) 503 return false; 504 505 const GlobalVariable *GV = dyn_cast_or_null<GlobalVariable>(GA->getGlobal()); 506 if (!GV) 507 return false; 508 509 if (!GV->hasAttribute("toc-data")) 510 return false; 511 512 // TODO: These asserts should be updated as more support for the toc data 513 // transformation is added (64 bit, struct support, etc.). 514 515 assert(PointerSize == 4 && "Only 32 Bit Codegen is currently supported by " 516 "the toc data transformation."); 517 518 assert(PointerSize >= GV->getAlign().valueOrOne().value() && 519 "GlobalVariables with an alignment requirement stricter then 4-bytes " 520 "not supported by the toc data transformation."); 521 522 Type *GVType = GV->getValueType(); 523 524 assert(GVType->isSized() && "A GlobalVariable's size must be known to be " 525 "supported by the toc data transformation."); 526 527 if (GVType->isVectorTy()) 528 report_fatal_error("A GlobalVariable of Vector type is not currently " 529 "supported by the toc data transformation."); 530 531 if (GVType->isArrayTy()) 532 report_fatal_error("A GlobalVariable of Array type is not currently " 533 "supported by the toc data transformation."); 534 535 if (GVType->isStructTy()) 536 report_fatal_error("A GlobalVariable of Struct type is not currently " 537 "supported by the toc data transformation."); 538 539 assert(GVType->getPrimitiveSizeInBits() <= PointerSize * 8 && 540 "A GlobalVariable with size larger than 32 bits is not currently " 541 "supported by the toc data transformation."); 542 543 if (GV->hasLocalLinkage() || GV->hasPrivateLinkage()) 544 report_fatal_error("A GlobalVariable with private or local linkage is not " 545 "currently supported by the toc data transformation."); 546 547 assert(!GV->hasCommonLinkage() && 548 "Tentative definitions cannot have the mapping class XMC_TD."); 549 550 return true; 551 } 552 553 /// isInt32Immediate - This method tests to see if the node is a 32-bit constant 554 /// operand. If so Imm will receive the 32-bit value. 555 static bool isInt32Immediate(SDNode *N, unsigned &Imm) { 556 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i32) { 557 Imm = cast<ConstantSDNode>(N)->getZExtValue(); 558 return true; 559 } 560 return false; 561 } 562 563 /// isInt64Immediate - This method tests to see if the node is a 64-bit constant 564 /// operand. If so Imm will receive the 64-bit value. 565 static bool isInt64Immediate(SDNode *N, uint64_t &Imm) { 566 if (N->getOpcode() == ISD::Constant && N->getValueType(0) == MVT::i64) { 567 Imm = cast<ConstantSDNode>(N)->getZExtValue(); 568 return true; 569 } 570 return false; 571 } 572 573 // isInt32Immediate - This method tests to see if a constant operand. 574 // If so Imm will receive the 32 bit value. 575 static bool isInt32Immediate(SDValue N, unsigned &Imm) { 576 return isInt32Immediate(N.getNode(), Imm); 577 } 578 579 /// isInt64Immediate - This method tests to see if the value is a 64-bit 580 /// constant operand. If so Imm will receive the 64-bit value. 581 static bool isInt64Immediate(SDValue N, uint64_t &Imm) { 582 return isInt64Immediate(N.getNode(), Imm); 583 } 584 585 static unsigned getBranchHint(unsigned PCC, 586 const FunctionLoweringInfo &FuncInfo, 587 const SDValue &DestMBB) { 588 assert(isa<BasicBlockSDNode>(DestMBB)); 589 590 if (!FuncInfo.BPI) return PPC::BR_NO_HINT; 591 592 const BasicBlock *BB = FuncInfo.MBB->getBasicBlock(); 593 const Instruction *BBTerm = BB->getTerminator(); 594 595 if (BBTerm->getNumSuccessors() != 2) return PPC::BR_NO_HINT; 596 597 const BasicBlock *TBB = BBTerm->getSuccessor(0); 598 const BasicBlock *FBB = BBTerm->getSuccessor(1); 599 600 auto TProb = FuncInfo.BPI->getEdgeProbability(BB, TBB); 601 auto FProb = FuncInfo.BPI->getEdgeProbability(BB, FBB); 602 603 // We only want to handle cases which are easy to predict at static time, e.g. 604 // C++ throw statement, that is very likely not taken, or calling never 605 // returned function, e.g. stdlib exit(). So we set Threshold to filter 606 // unwanted cases. 607 // 608 // Below is LLVM branch weight table, we only want to handle case 1, 2 609 // 610 // Case Taken:Nontaken Example 611 // 1. Unreachable 1048575:1 C++ throw, stdlib exit(), 612 // 2. Invoke-terminating 1:1048575 613 // 3. Coldblock 4:64 __builtin_expect 614 // 4. Loop Branch 124:4 For loop 615 // 5. PH/ZH/FPH 20:12 616 const uint32_t Threshold = 10000; 617 618 if (std::max(TProb, FProb) / Threshold < std::min(TProb, FProb)) 619 return PPC::BR_NO_HINT; 620 621 LLVM_DEBUG(dbgs() << "Use branch hint for '" << FuncInfo.Fn->getName() 622 << "::" << BB->getName() << "'\n" 623 << " -> " << TBB->getName() << ": " << TProb << "\n" 624 << " -> " << FBB->getName() << ": " << FProb << "\n"); 625 626 const BasicBlockSDNode *BBDN = cast<BasicBlockSDNode>(DestMBB); 627 628 // If Dest BasicBlock is False-BasicBlock (FBB), swap branch probabilities, 629 // because we want 'TProb' stands for 'branch probability' to Dest BasicBlock 630 if (BBDN->getBasicBlock()->getBasicBlock() != TBB) 631 std::swap(TProb, FProb); 632 633 return (TProb > FProb) ? PPC::BR_TAKEN_HINT : PPC::BR_NONTAKEN_HINT; 634 } 635 636 // isOpcWithIntImmediate - This method tests to see if the node is a specific 637 // opcode and that it has a immediate integer right operand. 638 // If so Imm will receive the 32 bit value. 639 static bool isOpcWithIntImmediate(SDNode *N, unsigned Opc, unsigned& Imm) { 640 return N->getOpcode() == Opc 641 && isInt32Immediate(N->getOperand(1).getNode(), Imm); 642 } 643 644 void PPCDAGToDAGISel::selectFrameIndex(SDNode *SN, SDNode *N, unsigned Offset) { 645 SDLoc dl(SN); 646 int FI = cast<FrameIndexSDNode>(N)->getIndex(); 647 SDValue TFI = CurDAG->getTargetFrameIndex(FI, N->getValueType(0)); 648 unsigned Opc = N->getValueType(0) == MVT::i32 ? PPC::ADDI : PPC::ADDI8; 649 if (SN->hasOneUse()) 650 CurDAG->SelectNodeTo(SN, Opc, N->getValueType(0), TFI, 651 getSmallIPtrImm(Offset, dl)); 652 else 653 ReplaceNode(SN, CurDAG->getMachineNode(Opc, dl, N->getValueType(0), TFI, 654 getSmallIPtrImm(Offset, dl))); 655 } 656 657 bool PPCDAGToDAGISel::isRotateAndMask(SDNode *N, unsigned Mask, 658 bool isShiftMask, unsigned &SH, 659 unsigned &MB, unsigned &ME) { 660 // Don't even go down this path for i64, since different logic will be 661 // necessary for rldicl/rldicr/rldimi. 662 if (N->getValueType(0) != MVT::i32) 663 return false; 664 665 unsigned Shift = 32; 666 unsigned Indeterminant = ~0; // bit mask marking indeterminant results 667 unsigned Opcode = N->getOpcode(); 668 if (N->getNumOperands() != 2 || 669 !isInt32Immediate(N->getOperand(1).getNode(), Shift) || (Shift > 31)) 670 return false; 671 672 if (Opcode == ISD::SHL) { 673 // apply shift left to mask if it comes first 674 if (isShiftMask) Mask = Mask << Shift; 675 // determine which bits are made indeterminant by shift 676 Indeterminant = ~(0xFFFFFFFFu << Shift); 677 } else if (Opcode == ISD::SRL) { 678 // apply shift right to mask if it comes first 679 if (isShiftMask) Mask = Mask >> Shift; 680 // determine which bits are made indeterminant by shift 681 Indeterminant = ~(0xFFFFFFFFu >> Shift); 682 // adjust for the left rotate 683 Shift = 32 - Shift; 684 } else if (Opcode == ISD::ROTL) { 685 Indeterminant = 0; 686 } else { 687 return false; 688 } 689 690 // if the mask doesn't intersect any Indeterminant bits 691 if (Mask && !(Mask & Indeterminant)) { 692 SH = Shift & 31; 693 // make sure the mask is still a mask (wrap arounds may not be) 694 return isRunOfOnes(Mask, MB, ME); 695 } 696 return false; 697 } 698 699 bool PPCDAGToDAGISel::tryTLSXFormStore(StoreSDNode *ST) { 700 SDValue Base = ST->getBasePtr(); 701 if (Base.getOpcode() != PPCISD::ADD_TLS) 702 return false; 703 SDValue Offset = ST->getOffset(); 704 if (!Offset.isUndef()) 705 return false; 706 if (Base.getOperand(1).getOpcode() == PPCISD::TLS_LOCAL_EXEC_MAT_ADDR) 707 return false; 708 709 SDLoc dl(ST); 710 EVT MemVT = ST->getMemoryVT(); 711 EVT RegVT = ST->getValue().getValueType(); 712 713 unsigned Opcode; 714 switch (MemVT.getSimpleVT().SimpleTy) { 715 default: 716 return false; 717 case MVT::i8: { 718 Opcode = (RegVT == MVT::i32) ? PPC::STBXTLS_32 : PPC::STBXTLS; 719 break; 720 } 721 case MVT::i16: { 722 Opcode = (RegVT == MVT::i32) ? PPC::STHXTLS_32 : PPC::STHXTLS; 723 break; 724 } 725 case MVT::i32: { 726 Opcode = (RegVT == MVT::i32) ? PPC::STWXTLS_32 : PPC::STWXTLS; 727 break; 728 } 729 case MVT::i64: { 730 Opcode = PPC::STDXTLS; 731 break; 732 } 733 } 734 SDValue Chain = ST->getChain(); 735 SDVTList VTs = ST->getVTList(); 736 SDValue Ops[] = {ST->getValue(), Base.getOperand(0), Base.getOperand(1), 737 Chain}; 738 SDNode *MN = CurDAG->getMachineNode(Opcode, dl, VTs, Ops); 739 transferMemOperands(ST, MN); 740 ReplaceNode(ST, MN); 741 return true; 742 } 743 744 bool PPCDAGToDAGISel::tryTLSXFormLoad(LoadSDNode *LD) { 745 SDValue Base = LD->getBasePtr(); 746 if (Base.getOpcode() != PPCISD::ADD_TLS) 747 return false; 748 SDValue Offset = LD->getOffset(); 749 if (!Offset.isUndef()) 750 return false; 751 if (Base.getOperand(1).getOpcode() == PPCISD::TLS_LOCAL_EXEC_MAT_ADDR) 752 return false; 753 754 SDLoc dl(LD); 755 EVT MemVT = LD->getMemoryVT(); 756 EVT RegVT = LD->getValueType(0); 757 unsigned Opcode; 758 switch (MemVT.getSimpleVT().SimpleTy) { 759 default: 760 return false; 761 case MVT::i8: { 762 Opcode = (RegVT == MVT::i32) ? PPC::LBZXTLS_32 : PPC::LBZXTLS; 763 break; 764 } 765 case MVT::i16: { 766 Opcode = (RegVT == MVT::i32) ? PPC::LHZXTLS_32 : PPC::LHZXTLS; 767 break; 768 } 769 case MVT::i32: { 770 Opcode = (RegVT == MVT::i32) ? PPC::LWZXTLS_32 : PPC::LWZXTLS; 771 break; 772 } 773 case MVT::i64: { 774 Opcode = PPC::LDXTLS; 775 break; 776 } 777 } 778 SDValue Chain = LD->getChain(); 779 SDVTList VTs = LD->getVTList(); 780 SDValue Ops[] = {Base.getOperand(0), Base.getOperand(1), Chain}; 781 SDNode *MN = CurDAG->getMachineNode(Opcode, dl, VTs, Ops); 782 transferMemOperands(LD, MN); 783 ReplaceNode(LD, MN); 784 return true; 785 } 786 787 /// Turn an or of two masked values into the rotate left word immediate then 788 /// mask insert (rlwimi) instruction. 789 bool PPCDAGToDAGISel::tryBitfieldInsert(SDNode *N) { 790 SDValue Op0 = N->getOperand(0); 791 SDValue Op1 = N->getOperand(1); 792 SDLoc dl(N); 793 794 KnownBits LKnown = CurDAG->computeKnownBits(Op0); 795 KnownBits RKnown = CurDAG->computeKnownBits(Op1); 796 797 unsigned TargetMask = LKnown.Zero.getZExtValue(); 798 unsigned InsertMask = RKnown.Zero.getZExtValue(); 799 800 if ((TargetMask | InsertMask) == 0xFFFFFFFF) { 801 unsigned Op0Opc = Op0.getOpcode(); 802 unsigned Op1Opc = Op1.getOpcode(); 803 unsigned Value, SH = 0; 804 TargetMask = ~TargetMask; 805 InsertMask = ~InsertMask; 806 807 // If the LHS has a foldable shift and the RHS does not, then swap it to the 808 // RHS so that we can fold the shift into the insert. 809 if (Op0Opc == ISD::AND && Op1Opc == ISD::AND) { 810 if (Op0.getOperand(0).getOpcode() == ISD::SHL || 811 Op0.getOperand(0).getOpcode() == ISD::SRL) { 812 if (Op1.getOperand(0).getOpcode() != ISD::SHL && 813 Op1.getOperand(0).getOpcode() != ISD::SRL) { 814 std::swap(Op0, Op1); 815 std::swap(Op0Opc, Op1Opc); 816 std::swap(TargetMask, InsertMask); 817 } 818 } 819 } else if (Op0Opc == ISD::SHL || Op0Opc == ISD::SRL) { 820 if (Op1Opc == ISD::AND && Op1.getOperand(0).getOpcode() != ISD::SHL && 821 Op1.getOperand(0).getOpcode() != ISD::SRL) { 822 std::swap(Op0, Op1); 823 std::swap(Op0Opc, Op1Opc); 824 std::swap(TargetMask, InsertMask); 825 } 826 } 827 828 unsigned MB, ME; 829 if (isRunOfOnes(InsertMask, MB, ME)) { 830 if ((Op1Opc == ISD::SHL || Op1Opc == ISD::SRL) && 831 isInt32Immediate(Op1.getOperand(1), Value)) { 832 Op1 = Op1.getOperand(0); 833 SH = (Op1Opc == ISD::SHL) ? Value : 32 - Value; 834 } 835 if (Op1Opc == ISD::AND) { 836 // The AND mask might not be a constant, and we need to make sure that 837 // if we're going to fold the masking with the insert, all bits not 838 // know to be zero in the mask are known to be one. 839 KnownBits MKnown = CurDAG->computeKnownBits(Op1.getOperand(1)); 840 bool CanFoldMask = InsertMask == MKnown.One.getZExtValue(); 841 842 unsigned SHOpc = Op1.getOperand(0).getOpcode(); 843 if ((SHOpc == ISD::SHL || SHOpc == ISD::SRL) && CanFoldMask && 844 isInt32Immediate(Op1.getOperand(0).getOperand(1), Value)) { 845 // Note that Value must be in range here (less than 32) because 846 // otherwise there would not be any bits set in InsertMask. 847 Op1 = Op1.getOperand(0).getOperand(0); 848 SH = (SHOpc == ISD::SHL) ? Value : 32 - Value; 849 } 850 } 851 852 SH &= 31; 853 SDValue Ops[] = { Op0, Op1, getI32Imm(SH, dl), getI32Imm(MB, dl), 854 getI32Imm(ME, dl) }; 855 ReplaceNode(N, CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops)); 856 return true; 857 } 858 } 859 return false; 860 } 861 862 static unsigned allUsesTruncate(SelectionDAG *CurDAG, SDNode *N) { 863 unsigned MaxTruncation = 0; 864 // Cannot use range-based for loop here as we need the actual use (i.e. we 865 // need the operand number corresponding to the use). A range-based for 866 // will unbox the use and provide an SDNode*. 867 for (SDNode::use_iterator Use = N->use_begin(), UseEnd = N->use_end(); 868 Use != UseEnd; ++Use) { 869 unsigned Opc = 870 Use->isMachineOpcode() ? Use->getMachineOpcode() : Use->getOpcode(); 871 switch (Opc) { 872 default: return 0; 873 case ISD::TRUNCATE: 874 if (Use->isMachineOpcode()) 875 return 0; 876 MaxTruncation = 877 std::max(MaxTruncation, (unsigned)Use->getValueType(0).getSizeInBits()); 878 continue; 879 case ISD::STORE: { 880 if (Use->isMachineOpcode()) 881 return 0; 882 StoreSDNode *STN = cast<StoreSDNode>(*Use); 883 unsigned MemVTSize = STN->getMemoryVT().getSizeInBits(); 884 if (MemVTSize == 64 || Use.getOperandNo() != 0) 885 return 0; 886 MaxTruncation = std::max(MaxTruncation, MemVTSize); 887 continue; 888 } 889 case PPC::STW8: 890 case PPC::STWX8: 891 case PPC::STWU8: 892 case PPC::STWUX8: 893 if (Use.getOperandNo() != 0) 894 return 0; 895 MaxTruncation = std::max(MaxTruncation, 32u); 896 continue; 897 case PPC::STH8: 898 case PPC::STHX8: 899 case PPC::STHU8: 900 case PPC::STHUX8: 901 if (Use.getOperandNo() != 0) 902 return 0; 903 MaxTruncation = std::max(MaxTruncation, 16u); 904 continue; 905 case PPC::STB8: 906 case PPC::STBX8: 907 case PPC::STBU8: 908 case PPC::STBUX8: 909 if (Use.getOperandNo() != 0) 910 return 0; 911 MaxTruncation = std::max(MaxTruncation, 8u); 912 continue; 913 } 914 } 915 return MaxTruncation; 916 } 917 918 // For any 32 < Num < 64, check if the Imm contains at least Num consecutive 919 // zeros and return the number of bits by the left of these consecutive zeros. 920 static int findContiguousZerosAtLeast(uint64_t Imm, unsigned Num) { 921 unsigned HiTZ = countTrailingZeros<uint32_t>(Hi_32(Imm)); 922 unsigned LoLZ = countLeadingZeros<uint32_t>(Lo_32(Imm)); 923 if ((HiTZ + LoLZ) >= Num) 924 return (32 + HiTZ); 925 return 0; 926 } 927 928 // Direct materialization of 64-bit constants by enumerated patterns. 929 static SDNode *selectI64ImmDirect(SelectionDAG *CurDAG, const SDLoc &dl, 930 uint64_t Imm, unsigned &InstCnt) { 931 unsigned TZ = countTrailingZeros<uint64_t>(Imm); 932 unsigned LZ = countLeadingZeros<uint64_t>(Imm); 933 unsigned TO = countTrailingOnes<uint64_t>(Imm); 934 unsigned LO = countLeadingOnes<uint64_t>(Imm); 935 unsigned Hi32 = Hi_32(Imm); 936 unsigned Lo32 = Lo_32(Imm); 937 SDNode *Result = nullptr; 938 unsigned Shift = 0; 939 940 auto getI32Imm = [CurDAG, dl](unsigned Imm) { 941 return CurDAG->getTargetConstant(Imm, dl, MVT::i32); 942 }; 943 944 // Following patterns use 1 instructions to materialize the Imm. 945 InstCnt = 1; 946 // 1-1) Patterns : {zeros}{15-bit valve} 947 // {ones}{15-bit valve} 948 if (isInt<16>(Imm)) { 949 SDValue SDImm = CurDAG->getTargetConstant(Imm, dl, MVT::i64); 950 return CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, SDImm); 951 } 952 // 1-2) Patterns : {zeros}{15-bit valve}{16 zeros} 953 // {ones}{15-bit valve}{16 zeros} 954 if (TZ > 15 && (LZ > 32 || LO > 32)) 955 return CurDAG->getMachineNode(PPC::LIS8, dl, MVT::i64, 956 getI32Imm((Imm >> 16) & 0xffff)); 957 958 // Following patterns use 2 instructions to materialize the Imm. 959 InstCnt = 2; 960 assert(LZ < 64 && "Unexpected leading zeros here."); 961 // Count of ones follwing the leading zeros. 962 unsigned FO = countLeadingOnes<uint64_t>(Imm << LZ); 963 // 2-1) Patterns : {zeros}{31-bit value} 964 // {ones}{31-bit value} 965 if (isInt<32>(Imm)) { 966 uint64_t ImmHi16 = (Imm >> 16) & 0xffff; 967 unsigned Opcode = ImmHi16 ? PPC::LIS8 : PPC::LI8; 968 Result = CurDAG->getMachineNode(Opcode, dl, MVT::i64, getI32Imm(ImmHi16)); 969 return CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 970 getI32Imm(Imm & 0xffff)); 971 } 972 // 2-2) Patterns : {zeros}{ones}{15-bit value}{zeros} 973 // {zeros}{15-bit value}{zeros} 974 // {zeros}{ones}{15-bit value} 975 // {ones}{15-bit value}{zeros} 976 // We can take advantage of LI's sign-extension semantics to generate leading 977 // ones, and then use RLDIC to mask off the ones in both sides after rotation. 978 if ((LZ + FO + TZ) > 48) { 979 Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, 980 getI32Imm((Imm >> TZ) & 0xffff)); 981 return CurDAG->getMachineNode(PPC::RLDIC, dl, MVT::i64, SDValue(Result, 0), 982 getI32Imm(TZ), getI32Imm(LZ)); 983 } 984 // 2-3) Pattern : {zeros}{15-bit value}{ones} 985 // Shift right the Imm by (48 - LZ) bits to construct a negtive 16 bits value, 986 // therefore we can take advantage of LI's sign-extension semantics, and then 987 // mask them off after rotation. 988 // 989 // +--LZ--||-15-bit-||--TO--+ +-------------|--16-bit--+ 990 // |00000001bbbbbbbbb1111111| -> |00000000000001bbbbbbbbb1| 991 // +------------------------+ +------------------------+ 992 // 63 0 63 0 993 // Imm (Imm >> (48 - LZ) & 0xffff) 994 // +----sext-----|--16-bit--+ +clear-|-----------------+ 995 // |11111111111111bbbbbbbbb1| -> |00000001bbbbbbbbb1111111| 996 // +------------------------+ +------------------------+ 997 // 63 0 63 0 998 // LI8: sext many leading zeros RLDICL: rotate left (48 - LZ), clear left LZ 999 if ((LZ + TO) > 48) { 1000 // Since the immediates with (LZ > 32) have been handled by previous 1001 // patterns, here we have (LZ <= 32) to make sure we will not shift right 1002 // the Imm by a negative value. 1003 assert(LZ <= 32 && "Unexpected shift value."); 1004 Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, 1005 getI32Imm((Imm >> (48 - LZ) & 0xffff))); 1006 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1007 getI32Imm(48 - LZ), getI32Imm(LZ)); 1008 } 1009 // 2-4) Patterns : {zeros}{ones}{15-bit value}{ones} 1010 // {ones}{15-bit value}{ones} 1011 // We can take advantage of LI's sign-extension semantics to generate leading 1012 // ones, and then use RLDICL to mask off the ones in left sides (if required) 1013 // after rotation. 1014 // 1015 // +-LZ-FO||-15-bit-||--TO--+ +-------------|--16-bit--+ 1016 // |00011110bbbbbbbbb1111111| -> |000000000011110bbbbbbbbb| 1017 // +------------------------+ +------------------------+ 1018 // 63 0 63 0 1019 // Imm (Imm >> TO) & 0xffff 1020 // +----sext-----|--16-bit--+ +LZ|---------------------+ 1021 // |111111111111110bbbbbbbbb| -> |00011110bbbbbbbbb1111111| 1022 // +------------------------+ +------------------------+ 1023 // 63 0 63 0 1024 // LI8: sext many leading zeros RLDICL: rotate left TO, clear left LZ 1025 if ((LZ + FO + TO) > 48) { 1026 Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, 1027 getI32Imm((Imm >> TO) & 0xffff)); 1028 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1029 getI32Imm(TO), getI32Imm(LZ)); 1030 } 1031 // 2-5) Pattern : {32 zeros}{****}{0}{15-bit value} 1032 // If Hi32 is zero and the Lo16(in Lo32) can be presented as a positive 16 bit 1033 // value, we can use LI for Lo16 without generating leading ones then add the 1034 // Hi16(in Lo32). 1035 if (LZ == 32 && ((Lo32 & 0x8000) == 0)) { 1036 Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, 1037 getI32Imm(Lo32 & 0xffff)); 1038 return CurDAG->getMachineNode(PPC::ORIS8, dl, MVT::i64, SDValue(Result, 0), 1039 getI32Imm(Lo32 >> 16)); 1040 } 1041 // 2-6) Patterns : {******}{49 zeros}{******} 1042 // {******}{49 ones}{******} 1043 // If the Imm contains 49 consecutive zeros/ones, it means that a total of 15 1044 // bits remain on both sides. Rotate right the Imm to construct an int<16> 1045 // value, use LI for int<16> value and then use RLDICL without mask to rotate 1046 // it back. 1047 // 1048 // 1) findContiguousZerosAtLeast(Imm, 49) 1049 // +------|--zeros-|------+ +---ones--||---15 bit--+ 1050 // |bbbbbb0000000000aaaaaa| -> |0000000000aaaaaabbbbbb| 1051 // +----------------------+ +----------------------+ 1052 // 63 0 63 0 1053 // 1054 // 2) findContiguousZerosAtLeast(~Imm, 49) 1055 // +------|--ones--|------+ +---ones--||---15 bit--+ 1056 // |bbbbbb1111111111aaaaaa| -> |1111111111aaaaaabbbbbb| 1057 // +----------------------+ +----------------------+ 1058 // 63 0 63 0 1059 if ((Shift = findContiguousZerosAtLeast(Imm, 49)) || 1060 (Shift = findContiguousZerosAtLeast(~Imm, 49))) { 1061 uint64_t RotImm = APInt(64, Imm).rotr(Shift).getZExtValue(); 1062 Result = CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, 1063 getI32Imm(RotImm & 0xffff)); 1064 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1065 getI32Imm(Shift), getI32Imm(0)); 1066 } 1067 1068 // Following patterns use 3 instructions to materialize the Imm. 1069 InstCnt = 3; 1070 // 3-1) Patterns : {zeros}{ones}{31-bit value}{zeros} 1071 // {zeros}{31-bit value}{zeros} 1072 // {zeros}{ones}{31-bit value} 1073 // {ones}{31-bit value}{zeros} 1074 // We can take advantage of LIS's sign-extension semantics to generate leading 1075 // ones, add the remaining bits with ORI, and then use RLDIC to mask off the 1076 // ones in both sides after rotation. 1077 if ((LZ + FO + TZ) > 32) { 1078 uint64_t ImmHi16 = (Imm >> (TZ + 16)) & 0xffff; 1079 unsigned Opcode = ImmHi16 ? PPC::LIS8 : PPC::LI8; 1080 Result = CurDAG->getMachineNode(Opcode, dl, MVT::i64, getI32Imm(ImmHi16)); 1081 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 1082 getI32Imm((Imm >> TZ) & 0xffff)); 1083 return CurDAG->getMachineNode(PPC::RLDIC, dl, MVT::i64, SDValue(Result, 0), 1084 getI32Imm(TZ), getI32Imm(LZ)); 1085 } 1086 // 3-2) Pattern : {zeros}{31-bit value}{ones} 1087 // Shift right the Imm by (32 - LZ) bits to construct a negtive 32 bits value, 1088 // therefore we can take advantage of LIS's sign-extension semantics, add 1089 // the remaining bits with ORI, and then mask them off after rotation. 1090 // This is similar to Pattern 2-3, please refer to the diagram there. 1091 if ((LZ + TO) > 32) { 1092 // Since the immediates with (LZ > 32) have been handled by previous 1093 // patterns, here we have (LZ <= 32) to make sure we will not shift right 1094 // the Imm by a negative value. 1095 assert(LZ <= 32 && "Unexpected shift value."); 1096 Result = CurDAG->getMachineNode(PPC::LIS8, dl, MVT::i64, 1097 getI32Imm((Imm >> (48 - LZ)) & 0xffff)); 1098 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 1099 getI32Imm((Imm >> (32 - LZ)) & 0xffff)); 1100 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1101 getI32Imm(32 - LZ), getI32Imm(LZ)); 1102 } 1103 // 3-3) Patterns : {zeros}{ones}{31-bit value}{ones} 1104 // {ones}{31-bit value}{ones} 1105 // We can take advantage of LIS's sign-extension semantics to generate leading 1106 // ones, add the remaining bits with ORI, and then use RLDICL to mask off the 1107 // ones in left sides (if required) after rotation. 1108 // This is similar to Pattern 2-4, please refer to the diagram there. 1109 if ((LZ + FO + TO) > 32) { 1110 Result = CurDAG->getMachineNode(PPC::LIS8, dl, MVT::i64, 1111 getI32Imm((Imm >> (TO + 16)) & 0xffff)); 1112 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 1113 getI32Imm((Imm >> TO) & 0xffff)); 1114 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1115 getI32Imm(TO), getI32Imm(LZ)); 1116 } 1117 // 3-4) Patterns : High word == Low word 1118 if (Hi32 == Lo32) { 1119 // Handle the first 32 bits. 1120 uint64_t ImmHi16 = (Lo32 >> 16) & 0xffff; 1121 unsigned Opcode = ImmHi16 ? PPC::LIS8 : PPC::LI8; 1122 Result = CurDAG->getMachineNode(Opcode, dl, MVT::i64, getI32Imm(ImmHi16)); 1123 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 1124 getI32Imm(Lo32 & 0xffff)); 1125 // Use rldimi to insert the Low word into High word. 1126 SDValue Ops[] = {SDValue(Result, 0), SDValue(Result, 0), getI32Imm(32), 1127 getI32Imm(0)}; 1128 return CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops); 1129 } 1130 // 3-5) Patterns : {******}{33 zeros}{******} 1131 // {******}{33 ones}{******} 1132 // If the Imm contains 33 consecutive zeros/ones, it means that a total of 31 1133 // bits remain on both sides. Rotate right the Imm to construct an int<32> 1134 // value, use LIS + ORI for int<32> value and then use RLDICL without mask to 1135 // rotate it back. 1136 // This is similar to Pattern 2-6, please refer to the diagram there. 1137 if ((Shift = findContiguousZerosAtLeast(Imm, 33)) || 1138 (Shift = findContiguousZerosAtLeast(~Imm, 33))) { 1139 uint64_t RotImm = APInt(64, Imm).rotr(Shift).getZExtValue(); 1140 uint64_t ImmHi16 = (RotImm >> 16) & 0xffff; 1141 unsigned Opcode = ImmHi16 ? PPC::LIS8 : PPC::LI8; 1142 Result = CurDAG->getMachineNode(Opcode, dl, MVT::i64, getI32Imm(ImmHi16)); 1143 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 1144 getI32Imm(RotImm & 0xffff)); 1145 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1146 getI32Imm(Shift), getI32Imm(0)); 1147 } 1148 1149 InstCnt = 0; 1150 return nullptr; 1151 } 1152 1153 // Try to select instructions to generate a 64 bit immediate using prefix as 1154 // well as non prefix instructions. The function will return the SDNode 1155 // to materialize that constant or it will return nullptr if it does not 1156 // find one. The variable InstCnt is set to the number of instructions that 1157 // were selected. 1158 static SDNode *selectI64ImmDirectPrefix(SelectionDAG *CurDAG, const SDLoc &dl, 1159 uint64_t Imm, unsigned &InstCnt) { 1160 unsigned TZ = countTrailingZeros<uint64_t>(Imm); 1161 unsigned LZ = countLeadingZeros<uint64_t>(Imm); 1162 unsigned TO = countTrailingOnes<uint64_t>(Imm); 1163 unsigned FO = countLeadingOnes<uint64_t>(LZ == 64 ? 0 : (Imm << LZ)); 1164 unsigned Hi32 = Hi_32(Imm); 1165 unsigned Lo32 = Lo_32(Imm); 1166 1167 auto getI32Imm = [CurDAG, dl](unsigned Imm) { 1168 return CurDAG->getTargetConstant(Imm, dl, MVT::i32); 1169 }; 1170 1171 auto getI64Imm = [CurDAG, dl](uint64_t Imm) { 1172 return CurDAG->getTargetConstant(Imm, dl, MVT::i64); 1173 }; 1174 1175 // Following patterns use 1 instruction to materialize Imm. 1176 InstCnt = 1; 1177 1178 // The pli instruction can materialize up to 34 bits directly. 1179 // If a constant fits within 34-bits, emit the pli instruction here directly. 1180 if (isInt<34>(Imm)) 1181 return CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, 1182 CurDAG->getTargetConstant(Imm, dl, MVT::i64)); 1183 1184 // Require at least two instructions. 1185 InstCnt = 2; 1186 SDNode *Result = nullptr; 1187 // Patterns : {zeros}{ones}{33-bit value}{zeros} 1188 // {zeros}{33-bit value}{zeros} 1189 // {zeros}{ones}{33-bit value} 1190 // {ones}{33-bit value}{zeros} 1191 // We can take advantage of PLI's sign-extension semantics to generate leading 1192 // ones, and then use RLDIC to mask off the ones on both sides after rotation. 1193 if ((LZ + FO + TZ) > 30) { 1194 APInt SignedInt34 = APInt(34, (Imm >> TZ) & 0x3ffffffff); 1195 APInt Extended = SignedInt34.sext(64); 1196 Result = CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, 1197 getI64Imm(*Extended.getRawData())); 1198 return CurDAG->getMachineNode(PPC::RLDIC, dl, MVT::i64, SDValue(Result, 0), 1199 getI32Imm(TZ), getI32Imm(LZ)); 1200 } 1201 // Pattern : {zeros}{33-bit value}{ones} 1202 // Shift right the Imm by (30 - LZ) bits to construct a negative 34 bit value, 1203 // therefore we can take advantage of PLI's sign-extension semantics, and then 1204 // mask them off after rotation. 1205 // 1206 // +--LZ--||-33-bit-||--TO--+ +-------------|--34-bit--+ 1207 // |00000001bbbbbbbbb1111111| -> |00000000000001bbbbbbbbb1| 1208 // +------------------------+ +------------------------+ 1209 // 63 0 63 0 1210 // 1211 // +----sext-----|--34-bit--+ +clear-|-----------------+ 1212 // |11111111111111bbbbbbbbb1| -> |00000001bbbbbbbbb1111111| 1213 // +------------------------+ +------------------------+ 1214 // 63 0 63 0 1215 if ((LZ + TO) > 30) { 1216 APInt SignedInt34 = APInt(34, (Imm >> (30 - LZ)) & 0x3ffffffff); 1217 APInt Extended = SignedInt34.sext(64); 1218 Result = CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, 1219 getI64Imm(*Extended.getRawData())); 1220 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1221 getI32Imm(30 - LZ), getI32Imm(LZ)); 1222 } 1223 // Patterns : {zeros}{ones}{33-bit value}{ones} 1224 // {ones}{33-bit value}{ones} 1225 // Similar to LI we can take advantage of PLI's sign-extension semantics to 1226 // generate leading ones, and then use RLDICL to mask off the ones in left 1227 // sides (if required) after rotation. 1228 if ((LZ + FO + TO) > 30) { 1229 APInt SignedInt34 = APInt(34, (Imm >> TO) & 0x3ffffffff); 1230 APInt Extended = SignedInt34.sext(64); 1231 Result = CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, 1232 getI64Imm(*Extended.getRawData())); 1233 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, SDValue(Result, 0), 1234 getI32Imm(TO), getI32Imm(LZ)); 1235 } 1236 // Patterns : {******}{31 zeros}{******} 1237 // : {******}{31 ones}{******} 1238 // If Imm contains 31 consecutive zeros/ones then the remaining bit count 1239 // is 33. Rotate right the Imm to construct a int<33> value, we can use PLI 1240 // for the int<33> value and then use RLDICL without a mask to rotate it back. 1241 // 1242 // +------|--ones--|------+ +---ones--||---33 bit--+ 1243 // |bbbbbb1111111111aaaaaa| -> |1111111111aaaaaabbbbbb| 1244 // +----------------------+ +----------------------+ 1245 // 63 0 63 0 1246 for (unsigned Shift = 0; Shift < 63; ++Shift) { 1247 uint64_t RotImm = APInt(64, Imm).rotr(Shift).getZExtValue(); 1248 if (isInt<34>(RotImm)) { 1249 Result = 1250 CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, getI64Imm(RotImm)); 1251 return CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 1252 SDValue(Result, 0), getI32Imm(Shift), 1253 getI32Imm(0)); 1254 } 1255 } 1256 1257 // Patterns : High word == Low word 1258 // This is basically a splat of a 32 bit immediate. 1259 if (Hi32 == Lo32) { 1260 Result = CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, getI64Imm(Hi32)); 1261 SDValue Ops[] = {SDValue(Result, 0), SDValue(Result, 0), getI32Imm(32), 1262 getI32Imm(0)}; 1263 return CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops); 1264 } 1265 1266 InstCnt = 3; 1267 // Catch-all 1268 // This pattern can form any 64 bit immediate in 3 instructions. 1269 SDNode *ResultHi = 1270 CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, getI64Imm(Hi32)); 1271 SDNode *ResultLo = 1272 CurDAG->getMachineNode(PPC::PLI8, dl, MVT::i64, getI64Imm(Lo32)); 1273 SDValue Ops[] = {SDValue(ResultLo, 0), SDValue(ResultHi, 0), getI32Imm(32), 1274 getI32Imm(0)}; 1275 return CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops); 1276 } 1277 1278 static SDNode *selectI64Imm(SelectionDAG *CurDAG, const SDLoc &dl, uint64_t Imm, 1279 unsigned *InstCnt = nullptr) { 1280 unsigned InstCntDirect = 0; 1281 // No more than 3 instructions is used if we can select the i64 immediate 1282 // directly. 1283 SDNode *Result = selectI64ImmDirect(CurDAG, dl, Imm, InstCntDirect); 1284 1285 const PPCSubtarget &Subtarget = 1286 CurDAG->getMachineFunction().getSubtarget<PPCSubtarget>(); 1287 1288 // If we have prefixed instructions and there is a chance we can 1289 // materialize the constant with fewer prefixed instructions than 1290 // non-prefixed, try that. 1291 if (Subtarget.hasPrefixInstrs() && InstCntDirect != 1) { 1292 unsigned InstCntDirectP = 0; 1293 SDNode *ResultP = selectI64ImmDirectPrefix(CurDAG, dl, Imm, InstCntDirectP); 1294 // Use the prefix case in either of two cases: 1295 // 1) We have no result from the non-prefix case to use. 1296 // 2) The non-prefix case uses more instructions than the prefix case. 1297 // If the prefix and non-prefix cases use the same number of instructions 1298 // we will prefer the non-prefix case. 1299 if (ResultP && (!Result || InstCntDirectP < InstCntDirect)) { 1300 if (InstCnt) 1301 *InstCnt = InstCntDirectP; 1302 return ResultP; 1303 } 1304 } 1305 1306 if (Result) { 1307 if (InstCnt) 1308 *InstCnt = InstCntDirect; 1309 return Result; 1310 } 1311 auto getI32Imm = [CurDAG, dl](unsigned Imm) { 1312 return CurDAG->getTargetConstant(Imm, dl, MVT::i32); 1313 }; 1314 // Handle the upper 32 bit value. 1315 Result = 1316 selectI64ImmDirect(CurDAG, dl, Imm & 0xffffffff00000000, InstCntDirect); 1317 // Add in the last bits as required. 1318 if (uint32_t Hi16 = (Lo_32(Imm) >> 16) & 0xffff) { 1319 Result = CurDAG->getMachineNode(PPC::ORIS8, dl, MVT::i64, 1320 SDValue(Result, 0), getI32Imm(Hi16)); 1321 ++InstCntDirect; 1322 } 1323 if (uint32_t Lo16 = Lo_32(Imm) & 0xffff) { 1324 Result = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, SDValue(Result, 0), 1325 getI32Imm(Lo16)); 1326 ++InstCntDirect; 1327 } 1328 if (InstCnt) 1329 *InstCnt = InstCntDirect; 1330 return Result; 1331 } 1332 1333 // Select a 64-bit constant. 1334 static SDNode *selectI64Imm(SelectionDAG *CurDAG, SDNode *N) { 1335 SDLoc dl(N); 1336 1337 // Get 64 bit value. 1338 int64_t Imm = cast<ConstantSDNode>(N)->getZExtValue(); 1339 if (unsigned MinSize = allUsesTruncate(CurDAG, N)) { 1340 uint64_t SextImm = SignExtend64(Imm, MinSize); 1341 SDValue SDImm = CurDAG->getTargetConstant(SextImm, dl, MVT::i64); 1342 if (isInt<16>(SextImm)) 1343 return CurDAG->getMachineNode(PPC::LI8, dl, MVT::i64, SDImm); 1344 } 1345 return selectI64Imm(CurDAG, dl, Imm); 1346 } 1347 1348 namespace { 1349 1350 class BitPermutationSelector { 1351 struct ValueBit { 1352 SDValue V; 1353 1354 // The bit number in the value, using a convention where bit 0 is the 1355 // lowest-order bit. 1356 unsigned Idx; 1357 1358 // ConstZero means a bit we need to mask off. 1359 // Variable is a bit comes from an input variable. 1360 // VariableKnownToBeZero is also a bit comes from an input variable, 1361 // but it is known to be already zero. So we do not need to mask them. 1362 enum Kind { 1363 ConstZero, 1364 Variable, 1365 VariableKnownToBeZero 1366 } K; 1367 1368 ValueBit(SDValue V, unsigned I, Kind K = Variable) 1369 : V(V), Idx(I), K(K) {} 1370 ValueBit(Kind K = Variable) 1371 : V(SDValue(nullptr, 0)), Idx(UINT32_MAX), K(K) {} 1372 1373 bool isZero() const { 1374 return K == ConstZero || K == VariableKnownToBeZero; 1375 } 1376 1377 bool hasValue() const { 1378 return K == Variable || K == VariableKnownToBeZero; 1379 } 1380 1381 SDValue getValue() const { 1382 assert(hasValue() && "Cannot get the value of a constant bit"); 1383 return V; 1384 } 1385 1386 unsigned getValueBitIndex() const { 1387 assert(hasValue() && "Cannot get the value bit index of a constant bit"); 1388 return Idx; 1389 } 1390 }; 1391 1392 // A bit group has the same underlying value and the same rotate factor. 1393 struct BitGroup { 1394 SDValue V; 1395 unsigned RLAmt; 1396 unsigned StartIdx, EndIdx; 1397 1398 // This rotation amount assumes that the lower 32 bits of the quantity are 1399 // replicated in the high 32 bits by the rotation operator (which is done 1400 // by rlwinm and friends in 64-bit mode). 1401 bool Repl32; 1402 // Did converting to Repl32 == true change the rotation factor? If it did, 1403 // it decreased it by 32. 1404 bool Repl32CR; 1405 // Was this group coalesced after setting Repl32 to true? 1406 bool Repl32Coalesced; 1407 1408 BitGroup(SDValue V, unsigned R, unsigned S, unsigned E) 1409 : V(V), RLAmt(R), StartIdx(S), EndIdx(E), Repl32(false), Repl32CR(false), 1410 Repl32Coalesced(false) { 1411 LLVM_DEBUG(dbgs() << "\tbit group for " << V.getNode() << " RLAmt = " << R 1412 << " [" << S << ", " << E << "]\n"); 1413 } 1414 }; 1415 1416 // Information on each (Value, RLAmt) pair (like the number of groups 1417 // associated with each) used to choose the lowering method. 1418 struct ValueRotInfo { 1419 SDValue V; 1420 unsigned RLAmt = std::numeric_limits<unsigned>::max(); 1421 unsigned NumGroups = 0; 1422 unsigned FirstGroupStartIdx = std::numeric_limits<unsigned>::max(); 1423 bool Repl32 = false; 1424 1425 ValueRotInfo() = default; 1426 1427 // For sorting (in reverse order) by NumGroups, and then by 1428 // FirstGroupStartIdx. 1429 bool operator < (const ValueRotInfo &Other) const { 1430 // We need to sort so that the non-Repl32 come first because, when we're 1431 // doing masking, the Repl32 bit groups might be subsumed into the 64-bit 1432 // masking operation. 1433 if (Repl32 < Other.Repl32) 1434 return true; 1435 else if (Repl32 > Other.Repl32) 1436 return false; 1437 else if (NumGroups > Other.NumGroups) 1438 return true; 1439 else if (NumGroups < Other.NumGroups) 1440 return false; 1441 else if (RLAmt == 0 && Other.RLAmt != 0) 1442 return true; 1443 else if (RLAmt != 0 && Other.RLAmt == 0) 1444 return false; 1445 else if (FirstGroupStartIdx < Other.FirstGroupStartIdx) 1446 return true; 1447 return false; 1448 } 1449 }; 1450 1451 using ValueBitsMemoizedValue = std::pair<bool, SmallVector<ValueBit, 64>>; 1452 using ValueBitsMemoizer = 1453 DenseMap<SDValue, std::unique_ptr<ValueBitsMemoizedValue>>; 1454 ValueBitsMemoizer Memoizer; 1455 1456 // Return a pair of bool and a SmallVector pointer to a memoization entry. 1457 // The bool is true if something interesting was deduced, otherwise if we're 1458 // providing only a generic representation of V (or something else likewise 1459 // uninteresting for instruction selection) through the SmallVector. 1460 std::pair<bool, SmallVector<ValueBit, 64> *> getValueBits(SDValue V, 1461 unsigned NumBits) { 1462 auto &ValueEntry = Memoizer[V]; 1463 if (ValueEntry) 1464 return std::make_pair(ValueEntry->first, &ValueEntry->second); 1465 ValueEntry.reset(new ValueBitsMemoizedValue()); 1466 bool &Interesting = ValueEntry->first; 1467 SmallVector<ValueBit, 64> &Bits = ValueEntry->second; 1468 Bits.resize(NumBits); 1469 1470 switch (V.getOpcode()) { 1471 default: break; 1472 case ISD::ROTL: 1473 if (isa<ConstantSDNode>(V.getOperand(1))) { 1474 unsigned RotAmt = V.getConstantOperandVal(1); 1475 1476 const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second; 1477 1478 for (unsigned i = 0; i < NumBits; ++i) 1479 Bits[i] = LHSBits[i < RotAmt ? i + (NumBits - RotAmt) : i - RotAmt]; 1480 1481 return std::make_pair(Interesting = true, &Bits); 1482 } 1483 break; 1484 case ISD::SHL: 1485 case PPCISD::SHL: 1486 if (isa<ConstantSDNode>(V.getOperand(1))) { 1487 unsigned ShiftAmt = V.getConstantOperandVal(1); 1488 1489 const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second; 1490 1491 for (unsigned i = ShiftAmt; i < NumBits; ++i) 1492 Bits[i] = LHSBits[i - ShiftAmt]; 1493 1494 for (unsigned i = 0; i < ShiftAmt; ++i) 1495 Bits[i] = ValueBit(ValueBit::ConstZero); 1496 1497 return std::make_pair(Interesting = true, &Bits); 1498 } 1499 break; 1500 case ISD::SRL: 1501 case PPCISD::SRL: 1502 if (isa<ConstantSDNode>(V.getOperand(1))) { 1503 unsigned ShiftAmt = V.getConstantOperandVal(1); 1504 1505 const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second; 1506 1507 for (unsigned i = 0; i < NumBits - ShiftAmt; ++i) 1508 Bits[i] = LHSBits[i + ShiftAmt]; 1509 1510 for (unsigned i = NumBits - ShiftAmt; i < NumBits; ++i) 1511 Bits[i] = ValueBit(ValueBit::ConstZero); 1512 1513 return std::make_pair(Interesting = true, &Bits); 1514 } 1515 break; 1516 case ISD::AND: 1517 if (isa<ConstantSDNode>(V.getOperand(1))) { 1518 uint64_t Mask = V.getConstantOperandVal(1); 1519 1520 const SmallVector<ValueBit, 64> *LHSBits; 1521 // Mark this as interesting, only if the LHS was also interesting. This 1522 // prevents the overall procedure from matching a single immediate 'and' 1523 // (which is non-optimal because such an and might be folded with other 1524 // things if we don't select it here). 1525 std::tie(Interesting, LHSBits) = getValueBits(V.getOperand(0), NumBits); 1526 1527 for (unsigned i = 0; i < NumBits; ++i) 1528 if (((Mask >> i) & 1) == 1) 1529 Bits[i] = (*LHSBits)[i]; 1530 else { 1531 // AND instruction masks this bit. If the input is already zero, 1532 // we have nothing to do here. Otherwise, make the bit ConstZero. 1533 if ((*LHSBits)[i].isZero()) 1534 Bits[i] = (*LHSBits)[i]; 1535 else 1536 Bits[i] = ValueBit(ValueBit::ConstZero); 1537 } 1538 1539 return std::make_pair(Interesting, &Bits); 1540 } 1541 break; 1542 case ISD::OR: { 1543 const auto &LHSBits = *getValueBits(V.getOperand(0), NumBits).second; 1544 const auto &RHSBits = *getValueBits(V.getOperand(1), NumBits).second; 1545 1546 bool AllDisjoint = true; 1547 SDValue LastVal = SDValue(); 1548 unsigned LastIdx = 0; 1549 for (unsigned i = 0; i < NumBits; ++i) { 1550 if (LHSBits[i].isZero() && RHSBits[i].isZero()) { 1551 // If both inputs are known to be zero and one is ConstZero and 1552 // another is VariableKnownToBeZero, we can select whichever 1553 // we like. To minimize the number of bit groups, we select 1554 // VariableKnownToBeZero if this bit is the next bit of the same 1555 // input variable from the previous bit. Otherwise, we select 1556 // ConstZero. 1557 if (LHSBits[i].hasValue() && LHSBits[i].getValue() == LastVal && 1558 LHSBits[i].getValueBitIndex() == LastIdx + 1) 1559 Bits[i] = LHSBits[i]; 1560 else if (RHSBits[i].hasValue() && RHSBits[i].getValue() == LastVal && 1561 RHSBits[i].getValueBitIndex() == LastIdx + 1) 1562 Bits[i] = RHSBits[i]; 1563 else 1564 Bits[i] = ValueBit(ValueBit::ConstZero); 1565 } 1566 else if (LHSBits[i].isZero()) 1567 Bits[i] = RHSBits[i]; 1568 else if (RHSBits[i].isZero()) 1569 Bits[i] = LHSBits[i]; 1570 else { 1571 AllDisjoint = false; 1572 break; 1573 } 1574 // We remember the value and bit index of this bit. 1575 if (Bits[i].hasValue()) { 1576 LastVal = Bits[i].getValue(); 1577 LastIdx = Bits[i].getValueBitIndex(); 1578 } 1579 else { 1580 if (LastVal) LastVal = SDValue(); 1581 LastIdx = 0; 1582 } 1583 } 1584 1585 if (!AllDisjoint) 1586 break; 1587 1588 return std::make_pair(Interesting = true, &Bits); 1589 } 1590 case ISD::ZERO_EXTEND: { 1591 // We support only the case with zero extension from i32 to i64 so far. 1592 if (V.getValueType() != MVT::i64 || 1593 V.getOperand(0).getValueType() != MVT::i32) 1594 break; 1595 1596 const SmallVector<ValueBit, 64> *LHSBits; 1597 const unsigned NumOperandBits = 32; 1598 std::tie(Interesting, LHSBits) = getValueBits(V.getOperand(0), 1599 NumOperandBits); 1600 1601 for (unsigned i = 0; i < NumOperandBits; ++i) 1602 Bits[i] = (*LHSBits)[i]; 1603 1604 for (unsigned i = NumOperandBits; i < NumBits; ++i) 1605 Bits[i] = ValueBit(ValueBit::ConstZero); 1606 1607 return std::make_pair(Interesting, &Bits); 1608 } 1609 case ISD::TRUNCATE: { 1610 EVT FromType = V.getOperand(0).getValueType(); 1611 EVT ToType = V.getValueType(); 1612 // We support only the case with truncate from i64 to i32. 1613 if (FromType != MVT::i64 || ToType != MVT::i32) 1614 break; 1615 const unsigned NumAllBits = FromType.getSizeInBits(); 1616 SmallVector<ValueBit, 64> *InBits; 1617 std::tie(Interesting, InBits) = getValueBits(V.getOperand(0), 1618 NumAllBits); 1619 const unsigned NumValidBits = ToType.getSizeInBits(); 1620 1621 // A 32-bit instruction cannot touch upper 32-bit part of 64-bit value. 1622 // So, we cannot include this truncate. 1623 bool UseUpper32bit = false; 1624 for (unsigned i = 0; i < NumValidBits; ++i) 1625 if ((*InBits)[i].hasValue() && (*InBits)[i].getValueBitIndex() >= 32) { 1626 UseUpper32bit = true; 1627 break; 1628 } 1629 if (UseUpper32bit) 1630 break; 1631 1632 for (unsigned i = 0; i < NumValidBits; ++i) 1633 Bits[i] = (*InBits)[i]; 1634 1635 return std::make_pair(Interesting, &Bits); 1636 } 1637 case ISD::AssertZext: { 1638 // For AssertZext, we look through the operand and 1639 // mark the bits known to be zero. 1640 const SmallVector<ValueBit, 64> *LHSBits; 1641 std::tie(Interesting, LHSBits) = getValueBits(V.getOperand(0), 1642 NumBits); 1643 1644 EVT FromType = cast<VTSDNode>(V.getOperand(1))->getVT(); 1645 const unsigned NumValidBits = FromType.getSizeInBits(); 1646 for (unsigned i = 0; i < NumValidBits; ++i) 1647 Bits[i] = (*LHSBits)[i]; 1648 1649 // These bits are known to be zero but the AssertZext may be from a value 1650 // that already has some constant zero bits (i.e. from a masking and). 1651 for (unsigned i = NumValidBits; i < NumBits; ++i) 1652 Bits[i] = (*LHSBits)[i].hasValue() 1653 ? ValueBit((*LHSBits)[i].getValue(), 1654 (*LHSBits)[i].getValueBitIndex(), 1655 ValueBit::VariableKnownToBeZero) 1656 : ValueBit(ValueBit::ConstZero); 1657 1658 return std::make_pair(Interesting, &Bits); 1659 } 1660 case ISD::LOAD: 1661 LoadSDNode *LD = cast<LoadSDNode>(V); 1662 if (ISD::isZEXTLoad(V.getNode()) && V.getResNo() == 0) { 1663 EVT VT = LD->getMemoryVT(); 1664 const unsigned NumValidBits = VT.getSizeInBits(); 1665 1666 for (unsigned i = 0; i < NumValidBits; ++i) 1667 Bits[i] = ValueBit(V, i); 1668 1669 // These bits are known to be zero. 1670 for (unsigned i = NumValidBits; i < NumBits; ++i) 1671 Bits[i] = ValueBit(V, i, ValueBit::VariableKnownToBeZero); 1672 1673 // Zero-extending load itself cannot be optimized. So, it is not 1674 // interesting by itself though it gives useful information. 1675 return std::make_pair(Interesting = false, &Bits); 1676 } 1677 break; 1678 } 1679 1680 for (unsigned i = 0; i < NumBits; ++i) 1681 Bits[i] = ValueBit(V, i); 1682 1683 return std::make_pair(Interesting = false, &Bits); 1684 } 1685 1686 // For each value (except the constant ones), compute the left-rotate amount 1687 // to get it from its original to final position. 1688 void computeRotationAmounts() { 1689 NeedMask = false; 1690 RLAmt.resize(Bits.size()); 1691 for (unsigned i = 0; i < Bits.size(); ++i) 1692 if (Bits[i].hasValue()) { 1693 unsigned VBI = Bits[i].getValueBitIndex(); 1694 if (i >= VBI) 1695 RLAmt[i] = i - VBI; 1696 else 1697 RLAmt[i] = Bits.size() - (VBI - i); 1698 } else if (Bits[i].isZero()) { 1699 NeedMask = true; 1700 RLAmt[i] = UINT32_MAX; 1701 } else { 1702 llvm_unreachable("Unknown value bit type"); 1703 } 1704 } 1705 1706 // Collect groups of consecutive bits with the same underlying value and 1707 // rotation factor. If we're doing late masking, we ignore zeros, otherwise 1708 // they break up groups. 1709 void collectBitGroups(bool LateMask) { 1710 BitGroups.clear(); 1711 1712 unsigned LastRLAmt = RLAmt[0]; 1713 SDValue LastValue = Bits[0].hasValue() ? Bits[0].getValue() : SDValue(); 1714 unsigned LastGroupStartIdx = 0; 1715 bool IsGroupOfZeros = !Bits[LastGroupStartIdx].hasValue(); 1716 for (unsigned i = 1; i < Bits.size(); ++i) { 1717 unsigned ThisRLAmt = RLAmt[i]; 1718 SDValue ThisValue = Bits[i].hasValue() ? Bits[i].getValue() : SDValue(); 1719 if (LateMask && !ThisValue) { 1720 ThisValue = LastValue; 1721 ThisRLAmt = LastRLAmt; 1722 // If we're doing late masking, then the first bit group always starts 1723 // at zero (even if the first bits were zero). 1724 if (BitGroups.empty()) 1725 LastGroupStartIdx = 0; 1726 } 1727 1728 // If this bit is known to be zero and the current group is a bit group 1729 // of zeros, we do not need to terminate the current bit group even the 1730 // Value or RLAmt does not match here. Instead, we terminate this group 1731 // when the first non-zero bit appears later. 1732 if (IsGroupOfZeros && Bits[i].isZero()) 1733 continue; 1734 1735 // If this bit has the same underlying value and the same rotate factor as 1736 // the last one, then they're part of the same group. 1737 if (ThisRLAmt == LastRLAmt && ThisValue == LastValue) 1738 // We cannot continue the current group if this bits is not known to 1739 // be zero in a bit group of zeros. 1740 if (!(IsGroupOfZeros && ThisValue && !Bits[i].isZero())) 1741 continue; 1742 1743 if (LastValue.getNode()) 1744 BitGroups.push_back(BitGroup(LastValue, LastRLAmt, LastGroupStartIdx, 1745 i-1)); 1746 LastRLAmt = ThisRLAmt; 1747 LastValue = ThisValue; 1748 LastGroupStartIdx = i; 1749 IsGroupOfZeros = !Bits[LastGroupStartIdx].hasValue(); 1750 } 1751 if (LastValue.getNode()) 1752 BitGroups.push_back(BitGroup(LastValue, LastRLAmt, LastGroupStartIdx, 1753 Bits.size()-1)); 1754 1755 if (BitGroups.empty()) 1756 return; 1757 1758 // We might be able to combine the first and last groups. 1759 if (BitGroups.size() > 1) { 1760 // If the first and last groups are the same, then remove the first group 1761 // in favor of the last group, making the ending index of the last group 1762 // equal to the ending index of the to-be-removed first group. 1763 if (BitGroups[0].StartIdx == 0 && 1764 BitGroups[BitGroups.size()-1].EndIdx == Bits.size()-1 && 1765 BitGroups[0].V == BitGroups[BitGroups.size()-1].V && 1766 BitGroups[0].RLAmt == BitGroups[BitGroups.size()-1].RLAmt) { 1767 LLVM_DEBUG(dbgs() << "\tcombining final bit group with initial one\n"); 1768 BitGroups[BitGroups.size()-1].EndIdx = BitGroups[0].EndIdx; 1769 BitGroups.erase(BitGroups.begin()); 1770 } 1771 } 1772 } 1773 1774 // Take all (SDValue, RLAmt) pairs and sort them by the number of groups 1775 // associated with each. If the number of groups are same, we prefer a group 1776 // which does not require rotate, i.e. RLAmt is 0, to avoid the first rotate 1777 // instruction. If there is a degeneracy, pick the one that occurs 1778 // first (in the final value). 1779 void collectValueRotInfo() { 1780 ValueRots.clear(); 1781 1782 for (auto &BG : BitGroups) { 1783 unsigned RLAmtKey = BG.RLAmt + (BG.Repl32 ? 64 : 0); 1784 ValueRotInfo &VRI = ValueRots[std::make_pair(BG.V, RLAmtKey)]; 1785 VRI.V = BG.V; 1786 VRI.RLAmt = BG.RLAmt; 1787 VRI.Repl32 = BG.Repl32; 1788 VRI.NumGroups += 1; 1789 VRI.FirstGroupStartIdx = std::min(VRI.FirstGroupStartIdx, BG.StartIdx); 1790 } 1791 1792 // Now that we've collected the various ValueRotInfo instances, we need to 1793 // sort them. 1794 ValueRotsVec.clear(); 1795 for (auto &I : ValueRots) { 1796 ValueRotsVec.push_back(I.second); 1797 } 1798 llvm::sort(ValueRotsVec); 1799 } 1800 1801 // In 64-bit mode, rlwinm and friends have a rotation operator that 1802 // replicates the low-order 32 bits into the high-order 32-bits. The mask 1803 // indices of these instructions can only be in the lower 32 bits, so they 1804 // can only represent some 64-bit bit groups. However, when they can be used, 1805 // the 32-bit replication can be used to represent, as a single bit group, 1806 // otherwise separate bit groups. We'll convert to replicated-32-bit bit 1807 // groups when possible. Returns true if any of the bit groups were 1808 // converted. 1809 void assignRepl32BitGroups() { 1810 // If we have bits like this: 1811 // 1812 // Indices: 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0 1813 // V bits: ... 7 6 5 4 3 2 1 0 31 30 29 28 27 26 25 24 1814 // Groups: | RLAmt = 8 | RLAmt = 40 | 1815 // 1816 // But, making use of a 32-bit operation that replicates the low-order 32 1817 // bits into the high-order 32 bits, this can be one bit group with a RLAmt 1818 // of 8. 1819 1820 auto IsAllLow32 = [this](BitGroup & BG) { 1821 if (BG.StartIdx <= BG.EndIdx) { 1822 for (unsigned i = BG.StartIdx; i <= BG.EndIdx; ++i) { 1823 if (!Bits[i].hasValue()) 1824 continue; 1825 if (Bits[i].getValueBitIndex() >= 32) 1826 return false; 1827 } 1828 } else { 1829 for (unsigned i = BG.StartIdx; i < Bits.size(); ++i) { 1830 if (!Bits[i].hasValue()) 1831 continue; 1832 if (Bits[i].getValueBitIndex() >= 32) 1833 return false; 1834 } 1835 for (unsigned i = 0; i <= BG.EndIdx; ++i) { 1836 if (!Bits[i].hasValue()) 1837 continue; 1838 if (Bits[i].getValueBitIndex() >= 32) 1839 return false; 1840 } 1841 } 1842 1843 return true; 1844 }; 1845 1846 for (auto &BG : BitGroups) { 1847 // If this bit group has RLAmt of 0 and will not be merged with 1848 // another bit group, we don't benefit from Repl32. We don't mark 1849 // such group to give more freedom for later instruction selection. 1850 if (BG.RLAmt == 0) { 1851 auto PotentiallyMerged = [this](BitGroup & BG) { 1852 for (auto &BG2 : BitGroups) 1853 if (&BG != &BG2 && BG.V == BG2.V && 1854 (BG2.RLAmt == 0 || BG2.RLAmt == 32)) 1855 return true; 1856 return false; 1857 }; 1858 if (!PotentiallyMerged(BG)) 1859 continue; 1860 } 1861 if (BG.StartIdx < 32 && BG.EndIdx < 32) { 1862 if (IsAllLow32(BG)) { 1863 if (BG.RLAmt >= 32) { 1864 BG.RLAmt -= 32; 1865 BG.Repl32CR = true; 1866 } 1867 1868 BG.Repl32 = true; 1869 1870 LLVM_DEBUG(dbgs() << "\t32-bit replicated bit group for " 1871 << BG.V.getNode() << " RLAmt = " << BG.RLAmt << " [" 1872 << BG.StartIdx << ", " << BG.EndIdx << "]\n"); 1873 } 1874 } 1875 } 1876 1877 // Now walk through the bit groups, consolidating where possible. 1878 for (auto I = BitGroups.begin(); I != BitGroups.end();) { 1879 // We might want to remove this bit group by merging it with the previous 1880 // group (which might be the ending group). 1881 auto IP = (I == BitGroups.begin()) ? 1882 std::prev(BitGroups.end()) : std::prev(I); 1883 if (I->Repl32 && IP->Repl32 && I->V == IP->V && I->RLAmt == IP->RLAmt && 1884 I->StartIdx == (IP->EndIdx + 1) % 64 && I != IP) { 1885 1886 LLVM_DEBUG(dbgs() << "\tcombining 32-bit replicated bit group for " 1887 << I->V.getNode() << " RLAmt = " << I->RLAmt << " [" 1888 << I->StartIdx << ", " << I->EndIdx 1889 << "] with group with range [" << IP->StartIdx << ", " 1890 << IP->EndIdx << "]\n"); 1891 1892 IP->EndIdx = I->EndIdx; 1893 IP->Repl32CR = IP->Repl32CR || I->Repl32CR; 1894 IP->Repl32Coalesced = true; 1895 I = BitGroups.erase(I); 1896 continue; 1897 } else { 1898 // There is a special case worth handling: If there is a single group 1899 // covering the entire upper 32 bits, and it can be merged with both 1900 // the next and previous groups (which might be the same group), then 1901 // do so. If it is the same group (so there will be only one group in 1902 // total), then we need to reverse the order of the range so that it 1903 // covers the entire 64 bits. 1904 if (I->StartIdx == 32 && I->EndIdx == 63) { 1905 assert(std::next(I) == BitGroups.end() && 1906 "bit group ends at index 63 but there is another?"); 1907 auto IN = BitGroups.begin(); 1908 1909 if (IP->Repl32 && IN->Repl32 && I->V == IP->V && I->V == IN->V && 1910 (I->RLAmt % 32) == IP->RLAmt && (I->RLAmt % 32) == IN->RLAmt && 1911 IP->EndIdx == 31 && IN->StartIdx == 0 && I != IP && 1912 IsAllLow32(*I)) { 1913 1914 LLVM_DEBUG(dbgs() << "\tcombining bit group for " << I->V.getNode() 1915 << " RLAmt = " << I->RLAmt << " [" << I->StartIdx 1916 << ", " << I->EndIdx 1917 << "] with 32-bit replicated groups with ranges [" 1918 << IP->StartIdx << ", " << IP->EndIdx << "] and [" 1919 << IN->StartIdx << ", " << IN->EndIdx << "]\n"); 1920 1921 if (IP == IN) { 1922 // There is only one other group; change it to cover the whole 1923 // range (backward, so that it can still be Repl32 but cover the 1924 // whole 64-bit range). 1925 IP->StartIdx = 31; 1926 IP->EndIdx = 30; 1927 IP->Repl32CR = IP->Repl32CR || I->RLAmt >= 32; 1928 IP->Repl32Coalesced = true; 1929 I = BitGroups.erase(I); 1930 } else { 1931 // There are two separate groups, one before this group and one 1932 // after us (at the beginning). We're going to remove this group, 1933 // but also the group at the very beginning. 1934 IP->EndIdx = IN->EndIdx; 1935 IP->Repl32CR = IP->Repl32CR || IN->Repl32CR || I->RLAmt >= 32; 1936 IP->Repl32Coalesced = true; 1937 I = BitGroups.erase(I); 1938 BitGroups.erase(BitGroups.begin()); 1939 } 1940 1941 // This must be the last group in the vector (and we might have 1942 // just invalidated the iterator above), so break here. 1943 break; 1944 } 1945 } 1946 } 1947 1948 ++I; 1949 } 1950 } 1951 1952 SDValue getI32Imm(unsigned Imm, const SDLoc &dl) { 1953 return CurDAG->getTargetConstant(Imm, dl, MVT::i32); 1954 } 1955 1956 uint64_t getZerosMask() { 1957 uint64_t Mask = 0; 1958 for (unsigned i = 0; i < Bits.size(); ++i) { 1959 if (Bits[i].hasValue()) 1960 continue; 1961 Mask |= (UINT64_C(1) << i); 1962 } 1963 1964 return ~Mask; 1965 } 1966 1967 // This method extends an input value to 64 bit if input is 32-bit integer. 1968 // While selecting instructions in BitPermutationSelector in 64-bit mode, 1969 // an input value can be a 32-bit integer if a ZERO_EXTEND node is included. 1970 // In such case, we extend it to 64 bit to be consistent with other values. 1971 SDValue ExtendToInt64(SDValue V, const SDLoc &dl) { 1972 if (V.getValueSizeInBits() == 64) 1973 return V; 1974 1975 assert(V.getValueSizeInBits() == 32); 1976 SDValue SubRegIdx = CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32); 1977 SDValue ImDef = SDValue(CurDAG->getMachineNode(PPC::IMPLICIT_DEF, dl, 1978 MVT::i64), 0); 1979 SDValue ExtVal = SDValue(CurDAG->getMachineNode(PPC::INSERT_SUBREG, dl, 1980 MVT::i64, ImDef, V, 1981 SubRegIdx), 0); 1982 return ExtVal; 1983 } 1984 1985 SDValue TruncateToInt32(SDValue V, const SDLoc &dl) { 1986 if (V.getValueSizeInBits() == 32) 1987 return V; 1988 1989 assert(V.getValueSizeInBits() == 64); 1990 SDValue SubRegIdx = CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32); 1991 SDValue SubVal = SDValue(CurDAG->getMachineNode(PPC::EXTRACT_SUBREG, dl, 1992 MVT::i32, V, SubRegIdx), 0); 1993 return SubVal; 1994 } 1995 1996 // Depending on the number of groups for a particular value, it might be 1997 // better to rotate, mask explicitly (using andi/andis), and then or the 1998 // result. Select this part of the result first. 1999 void SelectAndParts32(const SDLoc &dl, SDValue &Res, unsigned *InstCnt) { 2000 if (BPermRewriterNoMasking) 2001 return; 2002 2003 for (ValueRotInfo &VRI : ValueRotsVec) { 2004 unsigned Mask = 0; 2005 for (unsigned i = 0; i < Bits.size(); ++i) { 2006 if (!Bits[i].hasValue() || Bits[i].getValue() != VRI.V) 2007 continue; 2008 if (RLAmt[i] != VRI.RLAmt) 2009 continue; 2010 Mask |= (1u << i); 2011 } 2012 2013 // Compute the masks for andi/andis that would be necessary. 2014 unsigned ANDIMask = (Mask & UINT16_MAX), ANDISMask = Mask >> 16; 2015 assert((ANDIMask != 0 || ANDISMask != 0) && 2016 "No set bits in mask for value bit groups"); 2017 bool NeedsRotate = VRI.RLAmt != 0; 2018 2019 // We're trying to minimize the number of instructions. If we have one 2020 // group, using one of andi/andis can break even. If we have three 2021 // groups, we can use both andi and andis and break even (to use both 2022 // andi and andis we also need to or the results together). We need four 2023 // groups if we also need to rotate. To use andi/andis we need to do more 2024 // than break even because rotate-and-mask instructions tend to be easier 2025 // to schedule. 2026 2027 // FIXME: We've biased here against using andi/andis, which is right for 2028 // POWER cores, but not optimal everywhere. For example, on the A2, 2029 // andi/andis have single-cycle latency whereas the rotate-and-mask 2030 // instructions take two cycles, and it would be better to bias toward 2031 // andi/andis in break-even cases. 2032 2033 unsigned NumAndInsts = (unsigned) NeedsRotate + 2034 (unsigned) (ANDIMask != 0) + 2035 (unsigned) (ANDISMask != 0) + 2036 (unsigned) (ANDIMask != 0 && ANDISMask != 0) + 2037 (unsigned) (bool) Res; 2038 2039 LLVM_DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode() 2040 << " RL: " << VRI.RLAmt << ":" 2041 << "\n\t\t\tisel using masking: " << NumAndInsts 2042 << " using rotates: " << VRI.NumGroups << "\n"); 2043 2044 if (NumAndInsts >= VRI.NumGroups) 2045 continue; 2046 2047 LLVM_DEBUG(dbgs() << "\t\t\t\tusing masking\n"); 2048 2049 if (InstCnt) *InstCnt += NumAndInsts; 2050 2051 SDValue VRot; 2052 if (VRI.RLAmt) { 2053 SDValue Ops[] = 2054 { TruncateToInt32(VRI.V, dl), getI32Imm(VRI.RLAmt, dl), 2055 getI32Imm(0, dl), getI32Imm(31, dl) }; 2056 VRot = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, 2057 Ops), 0); 2058 } else { 2059 VRot = TruncateToInt32(VRI.V, dl); 2060 } 2061 2062 SDValue ANDIVal, ANDISVal; 2063 if (ANDIMask != 0) 2064 ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI_rec, dl, MVT::i32, 2065 VRot, getI32Imm(ANDIMask, dl)), 2066 0); 2067 if (ANDISMask != 0) 2068 ANDISVal = 2069 SDValue(CurDAG->getMachineNode(PPC::ANDIS_rec, dl, MVT::i32, VRot, 2070 getI32Imm(ANDISMask, dl)), 2071 0); 2072 2073 SDValue TotalVal; 2074 if (!ANDIVal) 2075 TotalVal = ANDISVal; 2076 else if (!ANDISVal) 2077 TotalVal = ANDIVal; 2078 else 2079 TotalVal = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32, 2080 ANDIVal, ANDISVal), 0); 2081 2082 if (!Res) 2083 Res = TotalVal; 2084 else 2085 Res = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32, 2086 Res, TotalVal), 0); 2087 2088 // Now, remove all groups with this underlying value and rotation 2089 // factor. 2090 eraseMatchingBitGroups([VRI](const BitGroup &BG) { 2091 return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt; 2092 }); 2093 } 2094 } 2095 2096 // Instruction selection for the 32-bit case. 2097 SDNode *Select32(SDNode *N, bool LateMask, unsigned *InstCnt) { 2098 SDLoc dl(N); 2099 SDValue Res; 2100 2101 if (InstCnt) *InstCnt = 0; 2102 2103 // Take care of cases that should use andi/andis first. 2104 SelectAndParts32(dl, Res, InstCnt); 2105 2106 // If we've not yet selected a 'starting' instruction, and we have no zeros 2107 // to fill in, select the (Value, RLAmt) with the highest priority (largest 2108 // number of groups), and start with this rotated value. 2109 if ((!NeedMask || LateMask) && !Res) { 2110 ValueRotInfo &VRI = ValueRotsVec[0]; 2111 if (VRI.RLAmt) { 2112 if (InstCnt) *InstCnt += 1; 2113 SDValue Ops[] = 2114 { TruncateToInt32(VRI.V, dl), getI32Imm(VRI.RLAmt, dl), 2115 getI32Imm(0, dl), getI32Imm(31, dl) }; 2116 Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 2117 0); 2118 } else { 2119 Res = TruncateToInt32(VRI.V, dl); 2120 } 2121 2122 // Now, remove all groups with this underlying value and rotation factor. 2123 eraseMatchingBitGroups([VRI](const BitGroup &BG) { 2124 return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt; 2125 }); 2126 } 2127 2128 if (InstCnt) *InstCnt += BitGroups.size(); 2129 2130 // Insert the other groups (one at a time). 2131 for (auto &BG : BitGroups) { 2132 if (!Res) { 2133 SDValue Ops[] = 2134 { TruncateToInt32(BG.V, dl), getI32Imm(BG.RLAmt, dl), 2135 getI32Imm(Bits.size() - BG.EndIdx - 1, dl), 2136 getI32Imm(Bits.size() - BG.StartIdx - 1, dl) }; 2137 Res = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0); 2138 } else { 2139 SDValue Ops[] = 2140 { Res, TruncateToInt32(BG.V, dl), getI32Imm(BG.RLAmt, dl), 2141 getI32Imm(Bits.size() - BG.EndIdx - 1, dl), 2142 getI32Imm(Bits.size() - BG.StartIdx - 1, dl) }; 2143 Res = SDValue(CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops), 0); 2144 } 2145 } 2146 2147 if (LateMask) { 2148 unsigned Mask = (unsigned) getZerosMask(); 2149 2150 unsigned ANDIMask = (Mask & UINT16_MAX), ANDISMask = Mask >> 16; 2151 assert((ANDIMask != 0 || ANDISMask != 0) && 2152 "No set bits in zeros mask?"); 2153 2154 if (InstCnt) *InstCnt += (unsigned) (ANDIMask != 0) + 2155 (unsigned) (ANDISMask != 0) + 2156 (unsigned) (ANDIMask != 0 && ANDISMask != 0); 2157 2158 SDValue ANDIVal, ANDISVal; 2159 if (ANDIMask != 0) 2160 ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI_rec, dl, MVT::i32, 2161 Res, getI32Imm(ANDIMask, dl)), 2162 0); 2163 if (ANDISMask != 0) 2164 ANDISVal = 2165 SDValue(CurDAG->getMachineNode(PPC::ANDIS_rec, dl, MVT::i32, Res, 2166 getI32Imm(ANDISMask, dl)), 2167 0); 2168 2169 if (!ANDIVal) 2170 Res = ANDISVal; 2171 else if (!ANDISVal) 2172 Res = ANDIVal; 2173 else 2174 Res = SDValue(CurDAG->getMachineNode(PPC::OR, dl, MVT::i32, 2175 ANDIVal, ANDISVal), 0); 2176 } 2177 2178 return Res.getNode(); 2179 } 2180 2181 unsigned SelectRotMask64Count(unsigned RLAmt, bool Repl32, 2182 unsigned MaskStart, unsigned MaskEnd, 2183 bool IsIns) { 2184 // In the notation used by the instructions, 'start' and 'end' are reversed 2185 // because bits are counted from high to low order. 2186 unsigned InstMaskStart = 64 - MaskEnd - 1, 2187 InstMaskEnd = 64 - MaskStart - 1; 2188 2189 if (Repl32) 2190 return 1; 2191 2192 if ((!IsIns && (InstMaskEnd == 63 || InstMaskStart == 0)) || 2193 InstMaskEnd == 63 - RLAmt) 2194 return 1; 2195 2196 return 2; 2197 } 2198 2199 // For 64-bit values, not all combinations of rotates and masks are 2200 // available. Produce one if it is available. 2201 SDValue SelectRotMask64(SDValue V, const SDLoc &dl, unsigned RLAmt, 2202 bool Repl32, unsigned MaskStart, unsigned MaskEnd, 2203 unsigned *InstCnt = nullptr) { 2204 // In the notation used by the instructions, 'start' and 'end' are reversed 2205 // because bits are counted from high to low order. 2206 unsigned InstMaskStart = 64 - MaskEnd - 1, 2207 InstMaskEnd = 64 - MaskStart - 1; 2208 2209 if (InstCnt) *InstCnt += 1; 2210 2211 if (Repl32) { 2212 // This rotation amount assumes that the lower 32 bits of the quantity 2213 // are replicated in the high 32 bits by the rotation operator (which is 2214 // done by rlwinm and friends). 2215 assert(InstMaskStart >= 32 && "Mask cannot start out of range"); 2216 assert(InstMaskEnd >= 32 && "Mask cannot end out of range"); 2217 SDValue Ops[] = 2218 { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), 2219 getI32Imm(InstMaskStart - 32, dl), getI32Imm(InstMaskEnd - 32, dl) }; 2220 return SDValue(CurDAG->getMachineNode(PPC::RLWINM8, dl, MVT::i64, 2221 Ops), 0); 2222 } 2223 2224 if (InstMaskEnd == 63) { 2225 SDValue Ops[] = 2226 { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), 2227 getI32Imm(InstMaskStart, dl) }; 2228 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Ops), 0); 2229 } 2230 2231 if (InstMaskStart == 0) { 2232 SDValue Ops[] = 2233 { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), 2234 getI32Imm(InstMaskEnd, dl) }; 2235 return SDValue(CurDAG->getMachineNode(PPC::RLDICR, dl, MVT::i64, Ops), 0); 2236 } 2237 2238 if (InstMaskEnd == 63 - RLAmt) { 2239 SDValue Ops[] = 2240 { ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), 2241 getI32Imm(InstMaskStart, dl) }; 2242 return SDValue(CurDAG->getMachineNode(PPC::RLDIC, dl, MVT::i64, Ops), 0); 2243 } 2244 2245 // We cannot do this with a single instruction, so we'll use two. The 2246 // problem is that we're not free to choose both a rotation amount and mask 2247 // start and end independently. We can choose an arbitrary mask start and 2248 // end, but then the rotation amount is fixed. Rotation, however, can be 2249 // inverted, and so by applying an "inverse" rotation first, we can get the 2250 // desired result. 2251 if (InstCnt) *InstCnt += 1; 2252 2253 // The rotation mask for the second instruction must be MaskStart. 2254 unsigned RLAmt2 = MaskStart; 2255 // The first instruction must rotate V so that the overall rotation amount 2256 // is RLAmt. 2257 unsigned RLAmt1 = (64 + RLAmt - RLAmt2) % 64; 2258 if (RLAmt1) 2259 V = SelectRotMask64(V, dl, RLAmt1, false, 0, 63); 2260 return SelectRotMask64(V, dl, RLAmt2, false, MaskStart, MaskEnd); 2261 } 2262 2263 // For 64-bit values, not all combinations of rotates and masks are 2264 // available. Produce a rotate-mask-and-insert if one is available. 2265 SDValue SelectRotMaskIns64(SDValue Base, SDValue V, const SDLoc &dl, 2266 unsigned RLAmt, bool Repl32, unsigned MaskStart, 2267 unsigned MaskEnd, unsigned *InstCnt = nullptr) { 2268 // In the notation used by the instructions, 'start' and 'end' are reversed 2269 // because bits are counted from high to low order. 2270 unsigned InstMaskStart = 64 - MaskEnd - 1, 2271 InstMaskEnd = 64 - MaskStart - 1; 2272 2273 if (InstCnt) *InstCnt += 1; 2274 2275 if (Repl32) { 2276 // This rotation amount assumes that the lower 32 bits of the quantity 2277 // are replicated in the high 32 bits by the rotation operator (which is 2278 // done by rlwinm and friends). 2279 assert(InstMaskStart >= 32 && "Mask cannot start out of range"); 2280 assert(InstMaskEnd >= 32 && "Mask cannot end out of range"); 2281 SDValue Ops[] = 2282 { ExtendToInt64(Base, dl), ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), 2283 getI32Imm(InstMaskStart - 32, dl), getI32Imm(InstMaskEnd - 32, dl) }; 2284 return SDValue(CurDAG->getMachineNode(PPC::RLWIMI8, dl, MVT::i64, 2285 Ops), 0); 2286 } 2287 2288 if (InstMaskEnd == 63 - RLAmt) { 2289 SDValue Ops[] = 2290 { ExtendToInt64(Base, dl), ExtendToInt64(V, dl), getI32Imm(RLAmt, dl), 2291 getI32Imm(InstMaskStart, dl) }; 2292 return SDValue(CurDAG->getMachineNode(PPC::RLDIMI, dl, MVT::i64, Ops), 0); 2293 } 2294 2295 // We cannot do this with a single instruction, so we'll use two. The 2296 // problem is that we're not free to choose both a rotation amount and mask 2297 // start and end independently. We can choose an arbitrary mask start and 2298 // end, but then the rotation amount is fixed. Rotation, however, can be 2299 // inverted, and so by applying an "inverse" rotation first, we can get the 2300 // desired result. 2301 if (InstCnt) *InstCnt += 1; 2302 2303 // The rotation mask for the second instruction must be MaskStart. 2304 unsigned RLAmt2 = MaskStart; 2305 // The first instruction must rotate V so that the overall rotation amount 2306 // is RLAmt. 2307 unsigned RLAmt1 = (64 + RLAmt - RLAmt2) % 64; 2308 if (RLAmt1) 2309 V = SelectRotMask64(V, dl, RLAmt1, false, 0, 63); 2310 return SelectRotMaskIns64(Base, V, dl, RLAmt2, false, MaskStart, MaskEnd); 2311 } 2312 2313 void SelectAndParts64(const SDLoc &dl, SDValue &Res, unsigned *InstCnt) { 2314 if (BPermRewriterNoMasking) 2315 return; 2316 2317 // The idea here is the same as in the 32-bit version, but with additional 2318 // complications from the fact that Repl32 might be true. Because we 2319 // aggressively convert bit groups to Repl32 form (which, for small 2320 // rotation factors, involves no other change), and then coalesce, it might 2321 // be the case that a single 64-bit masking operation could handle both 2322 // some Repl32 groups and some non-Repl32 groups. If converting to Repl32 2323 // form allowed coalescing, then we must use a 32-bit rotaton in order to 2324 // completely capture the new combined bit group. 2325 2326 for (ValueRotInfo &VRI : ValueRotsVec) { 2327 uint64_t Mask = 0; 2328 2329 // We need to add to the mask all bits from the associated bit groups. 2330 // If Repl32 is false, we need to add bits from bit groups that have 2331 // Repl32 true, but are trivially convertable to Repl32 false. Such a 2332 // group is trivially convertable if it overlaps only with the lower 32 2333 // bits, and the group has not been coalesced. 2334 auto MatchingBG = [VRI](const BitGroup &BG) { 2335 if (VRI.V != BG.V) 2336 return false; 2337 2338 unsigned EffRLAmt = BG.RLAmt; 2339 if (!VRI.Repl32 && BG.Repl32) { 2340 if (BG.StartIdx < 32 && BG.EndIdx < 32 && BG.StartIdx <= BG.EndIdx && 2341 !BG.Repl32Coalesced) { 2342 if (BG.Repl32CR) 2343 EffRLAmt += 32; 2344 } else { 2345 return false; 2346 } 2347 } else if (VRI.Repl32 != BG.Repl32) { 2348 return false; 2349 } 2350 2351 return VRI.RLAmt == EffRLAmt; 2352 }; 2353 2354 for (auto &BG : BitGroups) { 2355 if (!MatchingBG(BG)) 2356 continue; 2357 2358 if (BG.StartIdx <= BG.EndIdx) { 2359 for (unsigned i = BG.StartIdx; i <= BG.EndIdx; ++i) 2360 Mask |= (UINT64_C(1) << i); 2361 } else { 2362 for (unsigned i = BG.StartIdx; i < Bits.size(); ++i) 2363 Mask |= (UINT64_C(1) << i); 2364 for (unsigned i = 0; i <= BG.EndIdx; ++i) 2365 Mask |= (UINT64_C(1) << i); 2366 } 2367 } 2368 2369 // We can use the 32-bit andi/andis technique if the mask does not 2370 // require any higher-order bits. This can save an instruction compared 2371 // to always using the general 64-bit technique. 2372 bool Use32BitInsts = isUInt<32>(Mask); 2373 // Compute the masks for andi/andis that would be necessary. 2374 unsigned ANDIMask = (Mask & UINT16_MAX), 2375 ANDISMask = (Mask >> 16) & UINT16_MAX; 2376 2377 bool NeedsRotate = VRI.RLAmt || (VRI.Repl32 && !isUInt<32>(Mask)); 2378 2379 unsigned NumAndInsts = (unsigned) NeedsRotate + 2380 (unsigned) (bool) Res; 2381 unsigned NumOfSelectInsts = 0; 2382 selectI64Imm(CurDAG, dl, Mask, &NumOfSelectInsts); 2383 assert(NumOfSelectInsts > 0 && "Failed to select an i64 constant."); 2384 if (Use32BitInsts) 2385 NumAndInsts += (unsigned) (ANDIMask != 0) + (unsigned) (ANDISMask != 0) + 2386 (unsigned) (ANDIMask != 0 && ANDISMask != 0); 2387 else 2388 NumAndInsts += NumOfSelectInsts + /* and */ 1; 2389 2390 unsigned NumRLInsts = 0; 2391 bool FirstBG = true; 2392 bool MoreBG = false; 2393 for (auto &BG : BitGroups) { 2394 if (!MatchingBG(BG)) { 2395 MoreBG = true; 2396 continue; 2397 } 2398 NumRLInsts += 2399 SelectRotMask64Count(BG.RLAmt, BG.Repl32, BG.StartIdx, BG.EndIdx, 2400 !FirstBG); 2401 FirstBG = false; 2402 } 2403 2404 LLVM_DEBUG(dbgs() << "\t\trotation groups for " << VRI.V.getNode() 2405 << " RL: " << VRI.RLAmt << (VRI.Repl32 ? " (32):" : ":") 2406 << "\n\t\t\tisel using masking: " << NumAndInsts 2407 << " using rotates: " << NumRLInsts << "\n"); 2408 2409 // When we'd use andi/andis, we bias toward using the rotates (andi only 2410 // has a record form, and is cracked on POWER cores). However, when using 2411 // general 64-bit constant formation, bias toward the constant form, 2412 // because that exposes more opportunities for CSE. 2413 if (NumAndInsts > NumRLInsts) 2414 continue; 2415 // When merging multiple bit groups, instruction or is used. 2416 // But when rotate is used, rldimi can inert the rotated value into any 2417 // register, so instruction or can be avoided. 2418 if ((Use32BitInsts || MoreBG) && NumAndInsts == NumRLInsts) 2419 continue; 2420 2421 LLVM_DEBUG(dbgs() << "\t\t\t\tusing masking\n"); 2422 2423 if (InstCnt) *InstCnt += NumAndInsts; 2424 2425 SDValue VRot; 2426 // We actually need to generate a rotation if we have a non-zero rotation 2427 // factor or, in the Repl32 case, if we care about any of the 2428 // higher-order replicated bits. In the latter case, we generate a mask 2429 // backward so that it actually includes the entire 64 bits. 2430 if (VRI.RLAmt || (VRI.Repl32 && !isUInt<32>(Mask))) 2431 VRot = SelectRotMask64(VRI.V, dl, VRI.RLAmt, VRI.Repl32, 2432 VRI.Repl32 ? 31 : 0, VRI.Repl32 ? 30 : 63); 2433 else 2434 VRot = VRI.V; 2435 2436 SDValue TotalVal; 2437 if (Use32BitInsts) { 2438 assert((ANDIMask != 0 || ANDISMask != 0) && 2439 "No set bits in mask when using 32-bit ands for 64-bit value"); 2440 2441 SDValue ANDIVal, ANDISVal; 2442 if (ANDIMask != 0) 2443 ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI8_rec, dl, MVT::i64, 2444 ExtendToInt64(VRot, dl), 2445 getI32Imm(ANDIMask, dl)), 2446 0); 2447 if (ANDISMask != 0) 2448 ANDISVal = 2449 SDValue(CurDAG->getMachineNode(PPC::ANDIS8_rec, dl, MVT::i64, 2450 ExtendToInt64(VRot, dl), 2451 getI32Imm(ANDISMask, dl)), 2452 0); 2453 2454 if (!ANDIVal) 2455 TotalVal = ANDISVal; 2456 else if (!ANDISVal) 2457 TotalVal = ANDIVal; 2458 else 2459 TotalVal = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64, 2460 ExtendToInt64(ANDIVal, dl), ANDISVal), 0); 2461 } else { 2462 TotalVal = SDValue(selectI64Imm(CurDAG, dl, Mask), 0); 2463 TotalVal = 2464 SDValue(CurDAG->getMachineNode(PPC::AND8, dl, MVT::i64, 2465 ExtendToInt64(VRot, dl), TotalVal), 2466 0); 2467 } 2468 2469 if (!Res) 2470 Res = TotalVal; 2471 else 2472 Res = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64, 2473 ExtendToInt64(Res, dl), TotalVal), 2474 0); 2475 2476 // Now, remove all groups with this underlying value and rotation 2477 // factor. 2478 eraseMatchingBitGroups(MatchingBG); 2479 } 2480 } 2481 2482 // Instruction selection for the 64-bit case. 2483 SDNode *Select64(SDNode *N, bool LateMask, unsigned *InstCnt) { 2484 SDLoc dl(N); 2485 SDValue Res; 2486 2487 if (InstCnt) *InstCnt = 0; 2488 2489 // Take care of cases that should use andi/andis first. 2490 SelectAndParts64(dl, Res, InstCnt); 2491 2492 // If we've not yet selected a 'starting' instruction, and we have no zeros 2493 // to fill in, select the (Value, RLAmt) with the highest priority (largest 2494 // number of groups), and start with this rotated value. 2495 if ((!NeedMask || LateMask) && !Res) { 2496 // If we have both Repl32 groups and non-Repl32 groups, the non-Repl32 2497 // groups will come first, and so the VRI representing the largest number 2498 // of groups might not be first (it might be the first Repl32 groups). 2499 unsigned MaxGroupsIdx = 0; 2500 if (!ValueRotsVec[0].Repl32) { 2501 for (unsigned i = 0, ie = ValueRotsVec.size(); i < ie; ++i) 2502 if (ValueRotsVec[i].Repl32) { 2503 if (ValueRotsVec[i].NumGroups > ValueRotsVec[0].NumGroups) 2504 MaxGroupsIdx = i; 2505 break; 2506 } 2507 } 2508 2509 ValueRotInfo &VRI = ValueRotsVec[MaxGroupsIdx]; 2510 bool NeedsRotate = false; 2511 if (VRI.RLAmt) { 2512 NeedsRotate = true; 2513 } else if (VRI.Repl32) { 2514 for (auto &BG : BitGroups) { 2515 if (BG.V != VRI.V || BG.RLAmt != VRI.RLAmt || 2516 BG.Repl32 != VRI.Repl32) 2517 continue; 2518 2519 // We don't need a rotate if the bit group is confined to the lower 2520 // 32 bits. 2521 if (BG.StartIdx < 32 && BG.EndIdx < 32 && BG.StartIdx < BG.EndIdx) 2522 continue; 2523 2524 NeedsRotate = true; 2525 break; 2526 } 2527 } 2528 2529 if (NeedsRotate) 2530 Res = SelectRotMask64(VRI.V, dl, VRI.RLAmt, VRI.Repl32, 2531 VRI.Repl32 ? 31 : 0, VRI.Repl32 ? 30 : 63, 2532 InstCnt); 2533 else 2534 Res = VRI.V; 2535 2536 // Now, remove all groups with this underlying value and rotation factor. 2537 if (Res) 2538 eraseMatchingBitGroups([VRI](const BitGroup &BG) { 2539 return BG.V == VRI.V && BG.RLAmt == VRI.RLAmt && 2540 BG.Repl32 == VRI.Repl32; 2541 }); 2542 } 2543 2544 // Because 64-bit rotates are more flexible than inserts, we might have a 2545 // preference regarding which one we do first (to save one instruction). 2546 if (!Res) 2547 for (auto I = BitGroups.begin(), IE = BitGroups.end(); I != IE; ++I) { 2548 if (SelectRotMask64Count(I->RLAmt, I->Repl32, I->StartIdx, I->EndIdx, 2549 false) < 2550 SelectRotMask64Count(I->RLAmt, I->Repl32, I->StartIdx, I->EndIdx, 2551 true)) { 2552 if (I != BitGroups.begin()) { 2553 BitGroup BG = *I; 2554 BitGroups.erase(I); 2555 BitGroups.insert(BitGroups.begin(), BG); 2556 } 2557 2558 break; 2559 } 2560 } 2561 2562 // Insert the other groups (one at a time). 2563 for (auto &BG : BitGroups) { 2564 if (!Res) 2565 Res = SelectRotMask64(BG.V, dl, BG.RLAmt, BG.Repl32, BG.StartIdx, 2566 BG.EndIdx, InstCnt); 2567 else 2568 Res = SelectRotMaskIns64(Res, BG.V, dl, BG.RLAmt, BG.Repl32, 2569 BG.StartIdx, BG.EndIdx, InstCnt); 2570 } 2571 2572 if (LateMask) { 2573 uint64_t Mask = getZerosMask(); 2574 2575 // We can use the 32-bit andi/andis technique if the mask does not 2576 // require any higher-order bits. This can save an instruction compared 2577 // to always using the general 64-bit technique. 2578 bool Use32BitInsts = isUInt<32>(Mask); 2579 // Compute the masks for andi/andis that would be necessary. 2580 unsigned ANDIMask = (Mask & UINT16_MAX), 2581 ANDISMask = (Mask >> 16) & UINT16_MAX; 2582 2583 if (Use32BitInsts) { 2584 assert((ANDIMask != 0 || ANDISMask != 0) && 2585 "No set bits in mask when using 32-bit ands for 64-bit value"); 2586 2587 if (InstCnt) *InstCnt += (unsigned) (ANDIMask != 0) + 2588 (unsigned) (ANDISMask != 0) + 2589 (unsigned) (ANDIMask != 0 && ANDISMask != 0); 2590 2591 SDValue ANDIVal, ANDISVal; 2592 if (ANDIMask != 0) 2593 ANDIVal = SDValue(CurDAG->getMachineNode(PPC::ANDI8_rec, dl, MVT::i64, 2594 ExtendToInt64(Res, dl), 2595 getI32Imm(ANDIMask, dl)), 2596 0); 2597 if (ANDISMask != 0) 2598 ANDISVal = 2599 SDValue(CurDAG->getMachineNode(PPC::ANDIS8_rec, dl, MVT::i64, 2600 ExtendToInt64(Res, dl), 2601 getI32Imm(ANDISMask, dl)), 2602 0); 2603 2604 if (!ANDIVal) 2605 Res = ANDISVal; 2606 else if (!ANDISVal) 2607 Res = ANDIVal; 2608 else 2609 Res = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64, 2610 ExtendToInt64(ANDIVal, dl), ANDISVal), 0); 2611 } else { 2612 unsigned NumOfSelectInsts = 0; 2613 SDValue MaskVal = 2614 SDValue(selectI64Imm(CurDAG, dl, Mask, &NumOfSelectInsts), 0); 2615 Res = SDValue(CurDAG->getMachineNode(PPC::AND8, dl, MVT::i64, 2616 ExtendToInt64(Res, dl), MaskVal), 2617 0); 2618 if (InstCnt) 2619 *InstCnt += NumOfSelectInsts + /* and */ 1; 2620 } 2621 } 2622 2623 return Res.getNode(); 2624 } 2625 2626 SDNode *Select(SDNode *N, bool LateMask, unsigned *InstCnt = nullptr) { 2627 // Fill in BitGroups. 2628 collectBitGroups(LateMask); 2629 if (BitGroups.empty()) 2630 return nullptr; 2631 2632 // For 64-bit values, figure out when we can use 32-bit instructions. 2633 if (Bits.size() == 64) 2634 assignRepl32BitGroups(); 2635 2636 // Fill in ValueRotsVec. 2637 collectValueRotInfo(); 2638 2639 if (Bits.size() == 32) { 2640 return Select32(N, LateMask, InstCnt); 2641 } else { 2642 assert(Bits.size() == 64 && "Not 64 bits here?"); 2643 return Select64(N, LateMask, InstCnt); 2644 } 2645 2646 return nullptr; 2647 } 2648 2649 void eraseMatchingBitGroups(function_ref<bool(const BitGroup &)> F) { 2650 erase_if(BitGroups, F); 2651 } 2652 2653 SmallVector<ValueBit, 64> Bits; 2654 2655 bool NeedMask = false; 2656 SmallVector<unsigned, 64> RLAmt; 2657 2658 SmallVector<BitGroup, 16> BitGroups; 2659 2660 DenseMap<std::pair<SDValue, unsigned>, ValueRotInfo> ValueRots; 2661 SmallVector<ValueRotInfo, 16> ValueRotsVec; 2662 2663 SelectionDAG *CurDAG = nullptr; 2664 2665 public: 2666 BitPermutationSelector(SelectionDAG *DAG) 2667 : CurDAG(DAG) {} 2668 2669 // Here we try to match complex bit permutations into a set of 2670 // rotate-and-shift/shift/and/or instructions, using a set of heuristics 2671 // known to produce optimal code for common cases (like i32 byte swapping). 2672 SDNode *Select(SDNode *N) { 2673 Memoizer.clear(); 2674 auto Result = 2675 getValueBits(SDValue(N, 0), N->getValueType(0).getSizeInBits()); 2676 if (!Result.first) 2677 return nullptr; 2678 Bits = std::move(*Result.second); 2679 2680 LLVM_DEBUG(dbgs() << "Considering bit-permutation-based instruction" 2681 " selection for: "); 2682 LLVM_DEBUG(N->dump(CurDAG)); 2683 2684 // Fill it RLAmt and set NeedMask. 2685 computeRotationAmounts(); 2686 2687 if (!NeedMask) 2688 return Select(N, false); 2689 2690 // We currently have two techniques for handling results with zeros: early 2691 // masking (the default) and late masking. Late masking is sometimes more 2692 // efficient, but because the structure of the bit groups is different, it 2693 // is hard to tell without generating both and comparing the results. With 2694 // late masking, we ignore zeros in the resulting value when inserting each 2695 // set of bit groups, and then mask in the zeros at the end. With early 2696 // masking, we only insert the non-zero parts of the result at every step. 2697 2698 unsigned InstCnt = 0, InstCntLateMask = 0; 2699 LLVM_DEBUG(dbgs() << "\tEarly masking:\n"); 2700 SDNode *RN = Select(N, false, &InstCnt); 2701 LLVM_DEBUG(dbgs() << "\t\tisel would use " << InstCnt << " instructions\n"); 2702 2703 LLVM_DEBUG(dbgs() << "\tLate masking:\n"); 2704 SDNode *RNLM = Select(N, true, &InstCntLateMask); 2705 LLVM_DEBUG(dbgs() << "\t\tisel would use " << InstCntLateMask 2706 << " instructions\n"); 2707 2708 if (InstCnt <= InstCntLateMask) { 2709 LLVM_DEBUG(dbgs() << "\tUsing early-masking for isel\n"); 2710 return RN; 2711 } 2712 2713 LLVM_DEBUG(dbgs() << "\tUsing late-masking for isel\n"); 2714 return RNLM; 2715 } 2716 }; 2717 2718 class IntegerCompareEliminator { 2719 SelectionDAG *CurDAG; 2720 PPCDAGToDAGISel *S; 2721 // Conversion type for interpreting results of a 32-bit instruction as 2722 // a 64-bit value or vice versa. 2723 enum ExtOrTruncConversion { Ext, Trunc }; 2724 2725 // Modifiers to guide how an ISD::SETCC node's result is to be computed 2726 // in a GPR. 2727 // ZExtOrig - use the original condition code, zero-extend value 2728 // ZExtInvert - invert the condition code, zero-extend value 2729 // SExtOrig - use the original condition code, sign-extend value 2730 // SExtInvert - invert the condition code, sign-extend value 2731 enum SetccInGPROpts { ZExtOrig, ZExtInvert, SExtOrig, SExtInvert }; 2732 2733 // Comparisons against zero to emit GPR code sequences for. Each of these 2734 // sequences may need to be emitted for two or more equivalent patterns. 2735 // For example (a >= 0) == (a > -1). The direction of the comparison (</>) 2736 // matters as well as the extension type: sext (-1/0), zext (1/0). 2737 // GEZExt - (zext (LHS >= 0)) 2738 // GESExt - (sext (LHS >= 0)) 2739 // LEZExt - (zext (LHS <= 0)) 2740 // LESExt - (sext (LHS <= 0)) 2741 enum ZeroCompare { GEZExt, GESExt, LEZExt, LESExt }; 2742 2743 SDNode *tryEXTEND(SDNode *N); 2744 SDNode *tryLogicOpOfCompares(SDNode *N); 2745 SDValue computeLogicOpInGPR(SDValue LogicOp); 2746 SDValue signExtendInputIfNeeded(SDValue Input); 2747 SDValue zeroExtendInputIfNeeded(SDValue Input); 2748 SDValue addExtOrTrunc(SDValue NatWidthRes, ExtOrTruncConversion Conv); 2749 SDValue getCompoundZeroComparisonInGPR(SDValue LHS, SDLoc dl, 2750 ZeroCompare CmpTy); 2751 SDValue get32BitZExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2752 int64_t RHSValue, SDLoc dl); 2753 SDValue get32BitSExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2754 int64_t RHSValue, SDLoc dl); 2755 SDValue get64BitZExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2756 int64_t RHSValue, SDLoc dl); 2757 SDValue get64BitSExtCompare(SDValue LHS, SDValue RHS, ISD::CondCode CC, 2758 int64_t RHSValue, SDLoc dl); 2759 SDValue getSETCCInGPR(SDValue Compare, SetccInGPROpts ConvOpts); 2760 2761 public: 2762 IntegerCompareEliminator(SelectionDAG *DAG, 2763 PPCDAGToDAGISel *Sel) : CurDAG(DAG), S(Sel) { 2764 assert(CurDAG->getTargetLoweringInfo() 2765 .getPointerTy(CurDAG->getDataLayout()).getSizeInBits() == 64 && 2766 "Only expecting to use this on 64 bit targets."); 2767 } 2768 SDNode *Select(SDNode *N) { 2769 if (CmpInGPR == ICGPR_None) 2770 return nullptr; 2771 switch (N->getOpcode()) { 2772 default: break; 2773 case ISD::ZERO_EXTEND: 2774 if (CmpInGPR == ICGPR_Sext || CmpInGPR == ICGPR_SextI32 || 2775 CmpInGPR == ICGPR_SextI64) 2776 return nullptr; 2777 LLVM_FALLTHROUGH; 2778 case ISD::SIGN_EXTEND: 2779 if (CmpInGPR == ICGPR_Zext || CmpInGPR == ICGPR_ZextI32 || 2780 CmpInGPR == ICGPR_ZextI64) 2781 return nullptr; 2782 return tryEXTEND(N); 2783 case ISD::AND: 2784 case ISD::OR: 2785 case ISD::XOR: 2786 return tryLogicOpOfCompares(N); 2787 } 2788 return nullptr; 2789 } 2790 }; 2791 2792 static bool isLogicOp(unsigned Opc) { 2793 return Opc == ISD::AND || Opc == ISD::OR || Opc == ISD::XOR; 2794 } 2795 // The obvious case for wanting to keep the value in a GPR. Namely, the 2796 // result of the comparison is actually needed in a GPR. 2797 SDNode *IntegerCompareEliminator::tryEXTEND(SDNode *N) { 2798 assert((N->getOpcode() == ISD::ZERO_EXTEND || 2799 N->getOpcode() == ISD::SIGN_EXTEND) && 2800 "Expecting a zero/sign extend node!"); 2801 SDValue WideRes; 2802 // If we are zero-extending the result of a logical operation on i1 2803 // values, we can keep the values in GPRs. 2804 if (isLogicOp(N->getOperand(0).getOpcode()) && 2805 N->getOperand(0).getValueType() == MVT::i1 && 2806 N->getOpcode() == ISD::ZERO_EXTEND) 2807 WideRes = computeLogicOpInGPR(N->getOperand(0)); 2808 else if (N->getOperand(0).getOpcode() != ISD::SETCC) 2809 return nullptr; 2810 else 2811 WideRes = 2812 getSETCCInGPR(N->getOperand(0), 2813 N->getOpcode() == ISD::SIGN_EXTEND ? 2814 SetccInGPROpts::SExtOrig : SetccInGPROpts::ZExtOrig); 2815 2816 if (!WideRes) 2817 return nullptr; 2818 2819 SDLoc dl(N); 2820 bool Input32Bit = WideRes.getValueType() == MVT::i32; 2821 bool Output32Bit = N->getValueType(0) == MVT::i32; 2822 2823 NumSextSetcc += N->getOpcode() == ISD::SIGN_EXTEND ? 1 : 0; 2824 NumZextSetcc += N->getOpcode() == ISD::SIGN_EXTEND ? 0 : 1; 2825 2826 SDValue ConvOp = WideRes; 2827 if (Input32Bit != Output32Bit) 2828 ConvOp = addExtOrTrunc(WideRes, Input32Bit ? ExtOrTruncConversion::Ext : 2829 ExtOrTruncConversion::Trunc); 2830 return ConvOp.getNode(); 2831 } 2832 2833 // Attempt to perform logical operations on the results of comparisons while 2834 // keeping the values in GPRs. Without doing so, these would end up being 2835 // lowered to CR-logical operations which suffer from significant latency and 2836 // low ILP. 2837 SDNode *IntegerCompareEliminator::tryLogicOpOfCompares(SDNode *N) { 2838 if (N->getValueType(0) != MVT::i1) 2839 return nullptr; 2840 assert(isLogicOp(N->getOpcode()) && 2841 "Expected a logic operation on setcc results."); 2842 SDValue LoweredLogical = computeLogicOpInGPR(SDValue(N, 0)); 2843 if (!LoweredLogical) 2844 return nullptr; 2845 2846 SDLoc dl(N); 2847 bool IsBitwiseNegate = LoweredLogical.getMachineOpcode() == PPC::XORI8; 2848 unsigned SubRegToExtract = IsBitwiseNegate ? PPC::sub_eq : PPC::sub_gt; 2849 SDValue CR0Reg = CurDAG->getRegister(PPC::CR0, MVT::i32); 2850 SDValue LHS = LoweredLogical.getOperand(0); 2851 SDValue RHS = LoweredLogical.getOperand(1); 2852 SDValue WideOp; 2853 SDValue OpToConvToRecForm; 2854 2855 // Look through any 32-bit to 64-bit implicit extend nodes to find the 2856 // opcode that is input to the XORI. 2857 if (IsBitwiseNegate && 2858 LoweredLogical.getOperand(0).getMachineOpcode() == PPC::INSERT_SUBREG) 2859 OpToConvToRecForm = LoweredLogical.getOperand(0).getOperand(1); 2860 else if (IsBitwiseNegate) 2861 // If the input to the XORI isn't an extension, that's what we're after. 2862 OpToConvToRecForm = LoweredLogical.getOperand(0); 2863 else 2864 // If this is not an XORI, it is a reg-reg logical op and we can convert 2865 // it to record-form. 2866 OpToConvToRecForm = LoweredLogical; 2867 2868 // Get the record-form version of the node we're looking to use to get the 2869 // CR result from. 2870 uint16_t NonRecOpc = OpToConvToRecForm.getMachineOpcode(); 2871 int NewOpc = PPCInstrInfo::getRecordFormOpcode(NonRecOpc); 2872 2873 // Convert the right node to record-form. This is either the logical we're 2874 // looking at or it is the input node to the negation (if we're looking at 2875 // a bitwise negation). 2876 if (NewOpc != -1 && IsBitwiseNegate) { 2877 // The input to the XORI has a record-form. Use it. 2878 assert(LoweredLogical.getConstantOperandVal(1) == 1 && 2879 "Expected a PPC::XORI8 only for bitwise negation."); 2880 // Emit the record-form instruction. 2881 std::vector<SDValue> Ops; 2882 for (int i = 0, e = OpToConvToRecForm.getNumOperands(); i < e; i++) 2883 Ops.push_back(OpToConvToRecForm.getOperand(i)); 2884 2885 WideOp = 2886 SDValue(CurDAG->getMachineNode(NewOpc, dl, 2887 OpToConvToRecForm.getValueType(), 2888 MVT::Glue, Ops), 0); 2889 } else { 2890 assert((NewOpc != -1 || !IsBitwiseNegate) && 2891 "No record form available for AND8/OR8/XOR8?"); 2892 WideOp = 2893 SDValue(CurDAG->getMachineNode(NewOpc == -1 ? PPC::ANDI8_rec : NewOpc, 2894 dl, MVT::i64, MVT::Glue, LHS, RHS), 2895 0); 2896 } 2897 2898 // Select this node to a single bit from CR0 set by the record-form node 2899 // just created. For bitwise negation, use the EQ bit which is the equivalent 2900 // of negating the result (i.e. it is a bit set when the result of the 2901 // operation is zero). 2902 SDValue SRIdxVal = 2903 CurDAG->getTargetConstant(SubRegToExtract, dl, MVT::i32); 2904 SDValue CRBit = 2905 SDValue(CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, 2906 MVT::i1, CR0Reg, SRIdxVal, 2907 WideOp.getValue(1)), 0); 2908 return CRBit.getNode(); 2909 } 2910 2911 // Lower a logical operation on i1 values into a GPR sequence if possible. 2912 // The result can be kept in a GPR if requested. 2913 // Three types of inputs can be handled: 2914 // - SETCC 2915 // - TRUNCATE 2916 // - Logical operation (AND/OR/XOR) 2917 // There is also a special case that is handled (namely a complement operation 2918 // achieved with xor %a, -1). 2919 SDValue IntegerCompareEliminator::computeLogicOpInGPR(SDValue LogicOp) { 2920 assert(isLogicOp(LogicOp.getOpcode()) && 2921 "Can only handle logic operations here."); 2922 assert(LogicOp.getValueType() == MVT::i1 && 2923 "Can only handle logic operations on i1 values here."); 2924 SDLoc dl(LogicOp); 2925 SDValue LHS, RHS; 2926 2927 // Special case: xor %a, -1 2928 bool IsBitwiseNegation = isBitwiseNot(LogicOp); 2929 2930 // Produces a GPR sequence for each operand of the binary logic operation. 2931 // For SETCC, it produces the respective comparison, for TRUNCATE it truncates 2932 // the value in a GPR and for logic operations, it will recursively produce 2933 // a GPR sequence for the operation. 2934 auto getLogicOperand = [&] (SDValue Operand) -> SDValue { 2935 unsigned OperandOpcode = Operand.getOpcode(); 2936 if (OperandOpcode == ISD::SETCC) 2937 return getSETCCInGPR(Operand, SetccInGPROpts::ZExtOrig); 2938 else if (OperandOpcode == ISD::TRUNCATE) { 2939 SDValue InputOp = Operand.getOperand(0); 2940 EVT InVT = InputOp.getValueType(); 2941 return SDValue(CurDAG->getMachineNode(InVT == MVT::i32 ? PPC::RLDICL_32 : 2942 PPC::RLDICL, dl, InVT, InputOp, 2943 S->getI64Imm(0, dl), 2944 S->getI64Imm(63, dl)), 0); 2945 } else if (isLogicOp(OperandOpcode)) 2946 return computeLogicOpInGPR(Operand); 2947 return SDValue(); 2948 }; 2949 LHS = getLogicOperand(LogicOp.getOperand(0)); 2950 RHS = getLogicOperand(LogicOp.getOperand(1)); 2951 2952 // If a GPR sequence can't be produced for the LHS we can't proceed. 2953 // Not producing a GPR sequence for the RHS is only a problem if this isn't 2954 // a bitwise negation operation. 2955 if (!LHS || (!RHS && !IsBitwiseNegation)) 2956 return SDValue(); 2957 2958 NumLogicOpsOnComparison++; 2959 2960 // We will use the inputs as 64-bit values. 2961 if (LHS.getValueType() == MVT::i32) 2962 LHS = addExtOrTrunc(LHS, ExtOrTruncConversion::Ext); 2963 if (!IsBitwiseNegation && RHS.getValueType() == MVT::i32) 2964 RHS = addExtOrTrunc(RHS, ExtOrTruncConversion::Ext); 2965 2966 unsigned NewOpc; 2967 switch (LogicOp.getOpcode()) { 2968 default: llvm_unreachable("Unknown logic operation."); 2969 case ISD::AND: NewOpc = PPC::AND8; break; 2970 case ISD::OR: NewOpc = PPC::OR8; break; 2971 case ISD::XOR: NewOpc = PPC::XOR8; break; 2972 } 2973 2974 if (IsBitwiseNegation) { 2975 RHS = S->getI64Imm(1, dl); 2976 NewOpc = PPC::XORI8; 2977 } 2978 2979 return SDValue(CurDAG->getMachineNode(NewOpc, dl, MVT::i64, LHS, RHS), 0); 2980 2981 } 2982 2983 /// If the value isn't guaranteed to be sign-extended to 64-bits, extend it. 2984 /// Otherwise just reinterpret it as a 64-bit value. 2985 /// Useful when emitting comparison code for 32-bit values without using 2986 /// the compare instruction (which only considers the lower 32-bits). 2987 SDValue IntegerCompareEliminator::signExtendInputIfNeeded(SDValue Input) { 2988 assert(Input.getValueType() == MVT::i32 && 2989 "Can only sign-extend 32-bit values here."); 2990 unsigned Opc = Input.getOpcode(); 2991 2992 // The value was sign extended and then truncated to 32-bits. No need to 2993 // sign extend it again. 2994 if (Opc == ISD::TRUNCATE && 2995 (Input.getOperand(0).getOpcode() == ISD::AssertSext || 2996 Input.getOperand(0).getOpcode() == ISD::SIGN_EXTEND)) 2997 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext); 2998 2999 LoadSDNode *InputLoad = dyn_cast<LoadSDNode>(Input); 3000 // The input is a sign-extending load. All ppc sign-extending loads 3001 // sign-extend to the full 64-bits. 3002 if (InputLoad && InputLoad->getExtensionType() == ISD::SEXTLOAD) 3003 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext); 3004 3005 ConstantSDNode *InputConst = dyn_cast<ConstantSDNode>(Input); 3006 // We don't sign-extend constants. 3007 if (InputConst) 3008 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext); 3009 3010 SDLoc dl(Input); 3011 SignExtensionsAdded++; 3012 return SDValue(CurDAG->getMachineNode(PPC::EXTSW_32_64, dl, 3013 MVT::i64, Input), 0); 3014 } 3015 3016 /// If the value isn't guaranteed to be zero-extended to 64-bits, extend it. 3017 /// Otherwise just reinterpret it as a 64-bit value. 3018 /// Useful when emitting comparison code for 32-bit values without using 3019 /// the compare instruction (which only considers the lower 32-bits). 3020 SDValue IntegerCompareEliminator::zeroExtendInputIfNeeded(SDValue Input) { 3021 assert(Input.getValueType() == MVT::i32 && 3022 "Can only zero-extend 32-bit values here."); 3023 unsigned Opc = Input.getOpcode(); 3024 3025 // The only condition under which we can omit the actual extend instruction: 3026 // - The value is a positive constant 3027 // - The value comes from a load that isn't a sign-extending load 3028 // An ISD::TRUNCATE needs to be zero-extended unless it is fed by a zext. 3029 bool IsTruncateOfZExt = Opc == ISD::TRUNCATE && 3030 (Input.getOperand(0).getOpcode() == ISD::AssertZext || 3031 Input.getOperand(0).getOpcode() == ISD::ZERO_EXTEND); 3032 if (IsTruncateOfZExt) 3033 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext); 3034 3035 ConstantSDNode *InputConst = dyn_cast<ConstantSDNode>(Input); 3036 if (InputConst && InputConst->getSExtValue() >= 0) 3037 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext); 3038 3039 LoadSDNode *InputLoad = dyn_cast<LoadSDNode>(Input); 3040 // The input is a load that doesn't sign-extend (it will be zero-extended). 3041 if (InputLoad && InputLoad->getExtensionType() != ISD::SEXTLOAD) 3042 return addExtOrTrunc(Input, ExtOrTruncConversion::Ext); 3043 3044 // None of the above, need to zero-extend. 3045 SDLoc dl(Input); 3046 ZeroExtensionsAdded++; 3047 return SDValue(CurDAG->getMachineNode(PPC::RLDICL_32_64, dl, MVT::i64, Input, 3048 S->getI64Imm(0, dl), 3049 S->getI64Imm(32, dl)), 0); 3050 } 3051 3052 // Handle a 32-bit value in a 64-bit register and vice-versa. These are of 3053 // course not actual zero/sign extensions that will generate machine code, 3054 // they're just a way to reinterpret a 32 bit value in a register as a 3055 // 64 bit value and vice-versa. 3056 SDValue IntegerCompareEliminator::addExtOrTrunc(SDValue NatWidthRes, 3057 ExtOrTruncConversion Conv) { 3058 SDLoc dl(NatWidthRes); 3059 3060 // For reinterpreting 32-bit values as 64 bit values, we generate 3061 // INSERT_SUBREG IMPLICIT_DEF:i64, <input>, TargetConstant:i32<1> 3062 if (Conv == ExtOrTruncConversion::Ext) { 3063 SDValue ImDef(CurDAG->getMachineNode(PPC::IMPLICIT_DEF, dl, MVT::i64), 0); 3064 SDValue SubRegIdx = 3065 CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32); 3066 return SDValue(CurDAG->getMachineNode(PPC::INSERT_SUBREG, dl, MVT::i64, 3067 ImDef, NatWidthRes, SubRegIdx), 0); 3068 } 3069 3070 assert(Conv == ExtOrTruncConversion::Trunc && 3071 "Unknown convertion between 32 and 64 bit values."); 3072 // For reinterpreting 64-bit values as 32-bit values, we just need to 3073 // EXTRACT_SUBREG (i.e. extract the low word). 3074 SDValue SubRegIdx = 3075 CurDAG->getTargetConstant(PPC::sub_32, dl, MVT::i32); 3076 return SDValue(CurDAG->getMachineNode(PPC::EXTRACT_SUBREG, dl, MVT::i32, 3077 NatWidthRes, SubRegIdx), 0); 3078 } 3079 3080 // Produce a GPR sequence for compound comparisons (<=, >=) against zero. 3081 // Handle both zero-extensions and sign-extensions. 3082 SDValue 3083 IntegerCompareEliminator::getCompoundZeroComparisonInGPR(SDValue LHS, SDLoc dl, 3084 ZeroCompare CmpTy) { 3085 EVT InVT = LHS.getValueType(); 3086 bool Is32Bit = InVT == MVT::i32; 3087 SDValue ToExtend; 3088 3089 // Produce the value that needs to be either zero or sign extended. 3090 switch (CmpTy) { 3091 case ZeroCompare::GEZExt: 3092 case ZeroCompare::GESExt: 3093 ToExtend = SDValue(CurDAG->getMachineNode(Is32Bit ? PPC::NOR : PPC::NOR8, 3094 dl, InVT, LHS, LHS), 0); 3095 break; 3096 case ZeroCompare::LEZExt: 3097 case ZeroCompare::LESExt: { 3098 if (Is32Bit) { 3099 // Upper 32 bits cannot be undefined for this sequence. 3100 LHS = signExtendInputIfNeeded(LHS); 3101 SDValue Neg = 3102 SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, LHS), 0); 3103 ToExtend = 3104 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3105 Neg, S->getI64Imm(1, dl), 3106 S->getI64Imm(63, dl)), 0); 3107 } else { 3108 SDValue Addi = 3109 SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, LHS, 3110 S->getI64Imm(~0ULL, dl)), 0); 3111 ToExtend = SDValue(CurDAG->getMachineNode(PPC::OR8, dl, MVT::i64, 3112 Addi, LHS), 0); 3113 } 3114 break; 3115 } 3116 } 3117 3118 // For 64-bit sequences, the extensions are the same for the GE/LE cases. 3119 if (!Is32Bit && 3120 (CmpTy == ZeroCompare::GEZExt || CmpTy == ZeroCompare::LEZExt)) 3121 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3122 ToExtend, S->getI64Imm(1, dl), 3123 S->getI64Imm(63, dl)), 0); 3124 if (!Is32Bit && 3125 (CmpTy == ZeroCompare::GESExt || CmpTy == ZeroCompare::LESExt)) 3126 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, ToExtend, 3127 S->getI64Imm(63, dl)), 0); 3128 3129 assert(Is32Bit && "Should have handled the 32-bit sequences above."); 3130 // For 32-bit sequences, the extensions differ between GE/LE cases. 3131 switch (CmpTy) { 3132 case ZeroCompare::GEZExt: { 3133 SDValue ShiftOps[] = { ToExtend, S->getI32Imm(1, dl), S->getI32Imm(31, dl), 3134 S->getI32Imm(31, dl) }; 3135 return SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, 3136 ShiftOps), 0); 3137 } 3138 case ZeroCompare::GESExt: 3139 return SDValue(CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, ToExtend, 3140 S->getI32Imm(31, dl)), 0); 3141 case ZeroCompare::LEZExt: 3142 return SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64, ToExtend, 3143 S->getI32Imm(1, dl)), 0); 3144 case ZeroCompare::LESExt: 3145 return SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, ToExtend, 3146 S->getI32Imm(-1, dl)), 0); 3147 } 3148 3149 // The above case covers all the enumerators so it can't have a default clause 3150 // to avoid compiler warnings. 3151 llvm_unreachable("Unknown zero-comparison type."); 3152 } 3153 3154 /// Produces a zero-extended result of comparing two 32-bit values according to 3155 /// the passed condition code. 3156 SDValue 3157 IntegerCompareEliminator::get32BitZExtCompare(SDValue LHS, SDValue RHS, 3158 ISD::CondCode CC, 3159 int64_t RHSValue, SDLoc dl) { 3160 if (CmpInGPR == ICGPR_I64 || CmpInGPR == ICGPR_SextI64 || 3161 CmpInGPR == ICGPR_ZextI64 || CmpInGPR == ICGPR_Sext) 3162 return SDValue(); 3163 bool IsRHSZero = RHSValue == 0; 3164 bool IsRHSOne = RHSValue == 1; 3165 bool IsRHSNegOne = RHSValue == -1LL; 3166 switch (CC) { 3167 default: return SDValue(); 3168 case ISD::SETEQ: { 3169 // (zext (setcc %a, %b, seteq)) -> (lshr (cntlzw (xor %a, %b)), 5) 3170 // (zext (setcc %a, 0, seteq)) -> (lshr (cntlzw %a), 5) 3171 SDValue Xor = IsRHSZero ? LHS : 3172 SDValue(CurDAG->getMachineNode(PPC::XOR, dl, MVT::i32, LHS, RHS), 0); 3173 SDValue Clz = 3174 SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Xor), 0); 3175 SDValue ShiftOps[] = { Clz, S->getI32Imm(27, dl), S->getI32Imm(5, dl), 3176 S->getI32Imm(31, dl) }; 3177 return SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, 3178 ShiftOps), 0); 3179 } 3180 case ISD::SETNE: { 3181 // (zext (setcc %a, %b, setne)) -> (xor (lshr (cntlzw (xor %a, %b)), 5), 1) 3182 // (zext (setcc %a, 0, setne)) -> (xor (lshr (cntlzw %a), 5), 1) 3183 SDValue Xor = IsRHSZero ? LHS : 3184 SDValue(CurDAG->getMachineNode(PPC::XOR, dl, MVT::i32, LHS, RHS), 0); 3185 SDValue Clz = 3186 SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Xor), 0); 3187 SDValue ShiftOps[] = { Clz, S->getI32Imm(27, dl), S->getI32Imm(5, dl), 3188 S->getI32Imm(31, dl) }; 3189 SDValue Shift = 3190 SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, ShiftOps), 0); 3191 return SDValue(CurDAG->getMachineNode(PPC::XORI, dl, MVT::i32, Shift, 3192 S->getI32Imm(1, dl)), 0); 3193 } 3194 case ISD::SETGE: { 3195 // (zext (setcc %a, %b, setge)) -> (xor (lshr (sub %a, %b), 63), 1) 3196 // (zext (setcc %a, 0, setge)) -> (lshr (~ %a), 31) 3197 if(IsRHSZero) 3198 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GEZExt); 3199 3200 // Not a special case (i.e. RHS == 0). Handle (%a >= %b) as (%b <= %a) 3201 // by swapping inputs and falling through. 3202 std::swap(LHS, RHS); 3203 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3204 IsRHSZero = RHSConst && RHSConst->isZero(); 3205 LLVM_FALLTHROUGH; 3206 } 3207 case ISD::SETLE: { 3208 if (CmpInGPR == ICGPR_NonExtIn) 3209 return SDValue(); 3210 // (zext (setcc %a, %b, setle)) -> (xor (lshr (sub %b, %a), 63), 1) 3211 // (zext (setcc %a, 0, setle)) -> (xor (lshr (- %a), 63), 1) 3212 if(IsRHSZero) { 3213 if (CmpInGPR == ICGPR_NonExtIn) 3214 return SDValue(); 3215 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LEZExt); 3216 } 3217 3218 // The upper 32-bits of the register can't be undefined for this sequence. 3219 LHS = signExtendInputIfNeeded(LHS); 3220 RHS = signExtendInputIfNeeded(RHS); 3221 SDValue Sub = 3222 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, LHS, RHS), 0); 3223 SDValue Shift = 3224 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Sub, 3225 S->getI64Imm(1, dl), S->getI64Imm(63, dl)), 3226 0); 3227 return 3228 SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, 3229 MVT::i64, Shift, S->getI32Imm(1, dl)), 0); 3230 } 3231 case ISD::SETGT: { 3232 // (zext (setcc %a, %b, setgt)) -> (lshr (sub %b, %a), 63) 3233 // (zext (setcc %a, -1, setgt)) -> (lshr (~ %a), 31) 3234 // (zext (setcc %a, 0, setgt)) -> (lshr (- %a), 63) 3235 // Handle SETLT -1 (which is equivalent to SETGE 0). 3236 if (IsRHSNegOne) 3237 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GEZExt); 3238 3239 if (IsRHSZero) { 3240 if (CmpInGPR == ICGPR_NonExtIn) 3241 return SDValue(); 3242 // The upper 32-bits of the register can't be undefined for this sequence. 3243 LHS = signExtendInputIfNeeded(LHS); 3244 RHS = signExtendInputIfNeeded(RHS); 3245 SDValue Neg = 3246 SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, LHS), 0); 3247 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3248 Neg, S->getI32Imm(1, dl), S->getI32Imm(63, dl)), 0); 3249 } 3250 // Not a special case (i.e. RHS == 0 or RHS == -1). Handle (%a > %b) as 3251 // (%b < %a) by swapping inputs and falling through. 3252 std::swap(LHS, RHS); 3253 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3254 IsRHSZero = RHSConst && RHSConst->isZero(); 3255 IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1; 3256 LLVM_FALLTHROUGH; 3257 } 3258 case ISD::SETLT: { 3259 // (zext (setcc %a, %b, setlt)) -> (lshr (sub %a, %b), 63) 3260 // (zext (setcc %a, 1, setlt)) -> (xor (lshr (- %a), 63), 1) 3261 // (zext (setcc %a, 0, setlt)) -> (lshr %a, 31) 3262 // Handle SETLT 1 (which is equivalent to SETLE 0). 3263 if (IsRHSOne) { 3264 if (CmpInGPR == ICGPR_NonExtIn) 3265 return SDValue(); 3266 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LEZExt); 3267 } 3268 3269 if (IsRHSZero) { 3270 SDValue ShiftOps[] = { LHS, S->getI32Imm(1, dl), S->getI32Imm(31, dl), 3271 S->getI32Imm(31, dl) }; 3272 return SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, 3273 ShiftOps), 0); 3274 } 3275 3276 if (CmpInGPR == ICGPR_NonExtIn) 3277 return SDValue(); 3278 // The upper 32-bits of the register can't be undefined for this sequence. 3279 LHS = signExtendInputIfNeeded(LHS); 3280 RHS = signExtendInputIfNeeded(RHS); 3281 SDValue SUBFNode = 3282 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, RHS, LHS), 0); 3283 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3284 SUBFNode, S->getI64Imm(1, dl), 3285 S->getI64Imm(63, dl)), 0); 3286 } 3287 case ISD::SETUGE: 3288 // (zext (setcc %a, %b, setuge)) -> (xor (lshr (sub %b, %a), 63), 1) 3289 // (zext (setcc %a, %b, setule)) -> (xor (lshr (sub %a, %b), 63), 1) 3290 std::swap(LHS, RHS); 3291 LLVM_FALLTHROUGH; 3292 case ISD::SETULE: { 3293 if (CmpInGPR == ICGPR_NonExtIn) 3294 return SDValue(); 3295 // The upper 32-bits of the register can't be undefined for this sequence. 3296 LHS = zeroExtendInputIfNeeded(LHS); 3297 RHS = zeroExtendInputIfNeeded(RHS); 3298 SDValue Subtract = 3299 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, LHS, RHS), 0); 3300 SDValue SrdiNode = 3301 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3302 Subtract, S->getI64Imm(1, dl), 3303 S->getI64Imm(63, dl)), 0); 3304 return SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64, SrdiNode, 3305 S->getI32Imm(1, dl)), 0); 3306 } 3307 case ISD::SETUGT: 3308 // (zext (setcc %a, %b, setugt)) -> (lshr (sub %b, %a), 63) 3309 // (zext (setcc %a, %b, setult)) -> (lshr (sub %a, %b), 63) 3310 std::swap(LHS, RHS); 3311 LLVM_FALLTHROUGH; 3312 case ISD::SETULT: { 3313 if (CmpInGPR == ICGPR_NonExtIn) 3314 return SDValue(); 3315 // The upper 32-bits of the register can't be undefined for this sequence. 3316 LHS = zeroExtendInputIfNeeded(LHS); 3317 RHS = zeroExtendInputIfNeeded(RHS); 3318 SDValue Subtract = 3319 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, RHS, LHS), 0); 3320 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3321 Subtract, S->getI64Imm(1, dl), 3322 S->getI64Imm(63, dl)), 0); 3323 } 3324 } 3325 } 3326 3327 /// Produces a sign-extended result of comparing two 32-bit values according to 3328 /// the passed condition code. 3329 SDValue 3330 IntegerCompareEliminator::get32BitSExtCompare(SDValue LHS, SDValue RHS, 3331 ISD::CondCode CC, 3332 int64_t RHSValue, SDLoc dl) { 3333 if (CmpInGPR == ICGPR_I64 || CmpInGPR == ICGPR_SextI64 || 3334 CmpInGPR == ICGPR_ZextI64 || CmpInGPR == ICGPR_Zext) 3335 return SDValue(); 3336 bool IsRHSZero = RHSValue == 0; 3337 bool IsRHSOne = RHSValue == 1; 3338 bool IsRHSNegOne = RHSValue == -1LL; 3339 3340 switch (CC) { 3341 default: return SDValue(); 3342 case ISD::SETEQ: { 3343 // (sext (setcc %a, %b, seteq)) -> 3344 // (ashr (shl (ctlz (xor %a, %b)), 58), 63) 3345 // (sext (setcc %a, 0, seteq)) -> 3346 // (ashr (shl (ctlz %a), 58), 63) 3347 SDValue CountInput = IsRHSZero ? LHS : 3348 SDValue(CurDAG->getMachineNode(PPC::XOR, dl, MVT::i32, LHS, RHS), 0); 3349 SDValue Cntlzw = 3350 SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, CountInput), 0); 3351 SDValue SHLOps[] = { Cntlzw, S->getI32Imm(27, dl), 3352 S->getI32Imm(5, dl), S->getI32Imm(31, dl) }; 3353 SDValue Slwi = 3354 SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, SHLOps), 0); 3355 return SDValue(CurDAG->getMachineNode(PPC::NEG, dl, MVT::i32, Slwi), 0); 3356 } 3357 case ISD::SETNE: { 3358 // Bitwise xor the operands, count leading zeros, shift right by 5 bits and 3359 // flip the bit, finally take 2's complement. 3360 // (sext (setcc %a, %b, setne)) -> 3361 // (neg (xor (lshr (ctlz (xor %a, %b)), 5), 1)) 3362 // Same as above, but the first xor is not needed. 3363 // (sext (setcc %a, 0, setne)) -> 3364 // (neg (xor (lshr (ctlz %a), 5), 1)) 3365 SDValue Xor = IsRHSZero ? LHS : 3366 SDValue(CurDAG->getMachineNode(PPC::XOR, dl, MVT::i32, LHS, RHS), 0); 3367 SDValue Clz = 3368 SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Xor), 0); 3369 SDValue ShiftOps[] = 3370 { Clz, S->getI32Imm(27, dl), S->getI32Imm(5, dl), S->getI32Imm(31, dl) }; 3371 SDValue Shift = 3372 SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, ShiftOps), 0); 3373 SDValue Xori = 3374 SDValue(CurDAG->getMachineNode(PPC::XORI, dl, MVT::i32, Shift, 3375 S->getI32Imm(1, dl)), 0); 3376 return SDValue(CurDAG->getMachineNode(PPC::NEG, dl, MVT::i32, Xori), 0); 3377 } 3378 case ISD::SETGE: { 3379 // (sext (setcc %a, %b, setge)) -> (add (lshr (sub %a, %b), 63), -1) 3380 // (sext (setcc %a, 0, setge)) -> (ashr (~ %a), 31) 3381 if (IsRHSZero) 3382 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GESExt); 3383 3384 // Not a special case (i.e. RHS == 0). Handle (%a >= %b) as (%b <= %a) 3385 // by swapping inputs and falling through. 3386 std::swap(LHS, RHS); 3387 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3388 IsRHSZero = RHSConst && RHSConst->isZero(); 3389 LLVM_FALLTHROUGH; 3390 } 3391 case ISD::SETLE: { 3392 if (CmpInGPR == ICGPR_NonExtIn) 3393 return SDValue(); 3394 // (sext (setcc %a, %b, setge)) -> (add (lshr (sub %b, %a), 63), -1) 3395 // (sext (setcc %a, 0, setle)) -> (add (lshr (- %a), 63), -1) 3396 if (IsRHSZero) 3397 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LESExt); 3398 3399 // The upper 32-bits of the register can't be undefined for this sequence. 3400 LHS = signExtendInputIfNeeded(LHS); 3401 RHS = signExtendInputIfNeeded(RHS); 3402 SDValue SUBFNode = 3403 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, MVT::Glue, 3404 LHS, RHS), 0); 3405 SDValue Srdi = 3406 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3407 SUBFNode, S->getI64Imm(1, dl), 3408 S->getI64Imm(63, dl)), 0); 3409 return SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, Srdi, 3410 S->getI32Imm(-1, dl)), 0); 3411 } 3412 case ISD::SETGT: { 3413 // (sext (setcc %a, %b, setgt)) -> (ashr (sub %b, %a), 63) 3414 // (sext (setcc %a, -1, setgt)) -> (ashr (~ %a), 31) 3415 // (sext (setcc %a, 0, setgt)) -> (ashr (- %a), 63) 3416 if (IsRHSNegOne) 3417 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GESExt); 3418 if (IsRHSZero) { 3419 if (CmpInGPR == ICGPR_NonExtIn) 3420 return SDValue(); 3421 // The upper 32-bits of the register can't be undefined for this sequence. 3422 LHS = signExtendInputIfNeeded(LHS); 3423 RHS = signExtendInputIfNeeded(RHS); 3424 SDValue Neg = 3425 SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, LHS), 0); 3426 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, Neg, 3427 S->getI64Imm(63, dl)), 0); 3428 } 3429 // Not a special case (i.e. RHS == 0 or RHS == -1). Handle (%a > %b) as 3430 // (%b < %a) by swapping inputs and falling through. 3431 std::swap(LHS, RHS); 3432 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3433 IsRHSZero = RHSConst && RHSConst->isZero(); 3434 IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1; 3435 LLVM_FALLTHROUGH; 3436 } 3437 case ISD::SETLT: { 3438 // (sext (setcc %a, %b, setgt)) -> (ashr (sub %a, %b), 63) 3439 // (sext (setcc %a, 1, setgt)) -> (add (lshr (- %a), 63), -1) 3440 // (sext (setcc %a, 0, setgt)) -> (ashr %a, 31) 3441 if (IsRHSOne) { 3442 if (CmpInGPR == ICGPR_NonExtIn) 3443 return SDValue(); 3444 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LESExt); 3445 } 3446 if (IsRHSZero) 3447 return SDValue(CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, LHS, 3448 S->getI32Imm(31, dl)), 0); 3449 3450 if (CmpInGPR == ICGPR_NonExtIn) 3451 return SDValue(); 3452 // The upper 32-bits of the register can't be undefined for this sequence. 3453 LHS = signExtendInputIfNeeded(LHS); 3454 RHS = signExtendInputIfNeeded(RHS); 3455 SDValue SUBFNode = 3456 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, RHS, LHS), 0); 3457 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, 3458 SUBFNode, S->getI64Imm(63, dl)), 0); 3459 } 3460 case ISD::SETUGE: 3461 // (sext (setcc %a, %b, setuge)) -> (add (lshr (sub %a, %b), 63), -1) 3462 // (sext (setcc %a, %b, setule)) -> (add (lshr (sub %b, %a), 63), -1) 3463 std::swap(LHS, RHS); 3464 LLVM_FALLTHROUGH; 3465 case ISD::SETULE: { 3466 if (CmpInGPR == ICGPR_NonExtIn) 3467 return SDValue(); 3468 // The upper 32-bits of the register can't be undefined for this sequence. 3469 LHS = zeroExtendInputIfNeeded(LHS); 3470 RHS = zeroExtendInputIfNeeded(RHS); 3471 SDValue Subtract = 3472 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, LHS, RHS), 0); 3473 SDValue Shift = 3474 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Subtract, 3475 S->getI32Imm(1, dl), S->getI32Imm(63,dl)), 3476 0); 3477 return SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, Shift, 3478 S->getI32Imm(-1, dl)), 0); 3479 } 3480 case ISD::SETUGT: 3481 // (sext (setcc %a, %b, setugt)) -> (ashr (sub %b, %a), 63) 3482 // (sext (setcc %a, %b, setugt)) -> (ashr (sub %a, %b), 63) 3483 std::swap(LHS, RHS); 3484 LLVM_FALLTHROUGH; 3485 case ISD::SETULT: { 3486 if (CmpInGPR == ICGPR_NonExtIn) 3487 return SDValue(); 3488 // The upper 32-bits of the register can't be undefined for this sequence. 3489 LHS = zeroExtendInputIfNeeded(LHS); 3490 RHS = zeroExtendInputIfNeeded(RHS); 3491 SDValue Subtract = 3492 SDValue(CurDAG->getMachineNode(PPC::SUBF8, dl, MVT::i64, RHS, LHS), 0); 3493 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, 3494 Subtract, S->getI64Imm(63, dl)), 0); 3495 } 3496 } 3497 } 3498 3499 /// Produces a zero-extended result of comparing two 64-bit values according to 3500 /// the passed condition code. 3501 SDValue 3502 IntegerCompareEliminator::get64BitZExtCompare(SDValue LHS, SDValue RHS, 3503 ISD::CondCode CC, 3504 int64_t RHSValue, SDLoc dl) { 3505 if (CmpInGPR == ICGPR_I32 || CmpInGPR == ICGPR_SextI32 || 3506 CmpInGPR == ICGPR_ZextI32 || CmpInGPR == ICGPR_Sext) 3507 return SDValue(); 3508 bool IsRHSZero = RHSValue == 0; 3509 bool IsRHSOne = RHSValue == 1; 3510 bool IsRHSNegOne = RHSValue == -1LL; 3511 switch (CC) { 3512 default: return SDValue(); 3513 case ISD::SETEQ: { 3514 // (zext (setcc %a, %b, seteq)) -> (lshr (ctlz (xor %a, %b)), 6) 3515 // (zext (setcc %a, 0, seteq)) -> (lshr (ctlz %a), 6) 3516 SDValue Xor = IsRHSZero ? LHS : 3517 SDValue(CurDAG->getMachineNode(PPC::XOR8, dl, MVT::i64, LHS, RHS), 0); 3518 SDValue Clz = 3519 SDValue(CurDAG->getMachineNode(PPC::CNTLZD, dl, MVT::i64, Xor), 0); 3520 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Clz, 3521 S->getI64Imm(58, dl), 3522 S->getI64Imm(63, dl)), 0); 3523 } 3524 case ISD::SETNE: { 3525 // {addc.reg, addc.CA} = (addcarry (xor %a, %b), -1) 3526 // (zext (setcc %a, %b, setne)) -> (sube addc.reg, addc.reg, addc.CA) 3527 // {addcz.reg, addcz.CA} = (addcarry %a, -1) 3528 // (zext (setcc %a, 0, setne)) -> (sube addcz.reg, addcz.reg, addcz.CA) 3529 SDValue Xor = IsRHSZero ? LHS : 3530 SDValue(CurDAG->getMachineNode(PPC::XOR8, dl, MVT::i64, LHS, RHS), 0); 3531 SDValue AC = 3532 SDValue(CurDAG->getMachineNode(PPC::ADDIC8, dl, MVT::i64, MVT::Glue, 3533 Xor, S->getI32Imm(~0U, dl)), 0); 3534 return SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, AC, 3535 Xor, AC.getValue(1)), 0); 3536 } 3537 case ISD::SETGE: { 3538 // {subc.reg, subc.CA} = (subcarry %a, %b) 3539 // (zext (setcc %a, %b, setge)) -> 3540 // (adde (lshr %b, 63), (ashr %a, 63), subc.CA) 3541 // (zext (setcc %a, 0, setge)) -> (lshr (~ %a), 63) 3542 if (IsRHSZero) 3543 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GEZExt); 3544 std::swap(LHS, RHS); 3545 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3546 IsRHSZero = RHSConst && RHSConst->isZero(); 3547 LLVM_FALLTHROUGH; 3548 } 3549 case ISD::SETLE: { 3550 // {subc.reg, subc.CA} = (subcarry %b, %a) 3551 // (zext (setcc %a, %b, setge)) -> 3552 // (adde (lshr %a, 63), (ashr %b, 63), subc.CA) 3553 // (zext (setcc %a, 0, setge)) -> (lshr (or %a, (add %a, -1)), 63) 3554 if (IsRHSZero) 3555 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LEZExt); 3556 SDValue ShiftL = 3557 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, LHS, 3558 S->getI64Imm(1, dl), 3559 S->getI64Imm(63, dl)), 0); 3560 SDValue ShiftR = 3561 SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, RHS, 3562 S->getI64Imm(63, dl)), 0); 3563 SDValue SubtractCarry = 3564 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3565 LHS, RHS), 1); 3566 return SDValue(CurDAG->getMachineNode(PPC::ADDE8, dl, MVT::i64, MVT::Glue, 3567 ShiftR, ShiftL, SubtractCarry), 0); 3568 } 3569 case ISD::SETGT: { 3570 // {subc.reg, subc.CA} = (subcarry %b, %a) 3571 // (zext (setcc %a, %b, setgt)) -> 3572 // (xor (adde (lshr %a, 63), (ashr %b, 63), subc.CA), 1) 3573 // (zext (setcc %a, 0, setgt)) -> (lshr (nor (add %a, -1), %a), 63) 3574 if (IsRHSNegOne) 3575 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GEZExt); 3576 if (IsRHSZero) { 3577 SDValue Addi = 3578 SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, LHS, 3579 S->getI64Imm(~0ULL, dl)), 0); 3580 SDValue Nor = 3581 SDValue(CurDAG->getMachineNode(PPC::NOR8, dl, MVT::i64, Addi, LHS), 0); 3582 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, Nor, 3583 S->getI64Imm(1, dl), 3584 S->getI64Imm(63, dl)), 0); 3585 } 3586 std::swap(LHS, RHS); 3587 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3588 IsRHSZero = RHSConst && RHSConst->isZero(); 3589 IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1; 3590 LLVM_FALLTHROUGH; 3591 } 3592 case ISD::SETLT: { 3593 // {subc.reg, subc.CA} = (subcarry %a, %b) 3594 // (zext (setcc %a, %b, setlt)) -> 3595 // (xor (adde (lshr %b, 63), (ashr %a, 63), subc.CA), 1) 3596 // (zext (setcc %a, 0, setlt)) -> (lshr %a, 63) 3597 if (IsRHSOne) 3598 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LEZExt); 3599 if (IsRHSZero) 3600 return SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, LHS, 3601 S->getI64Imm(1, dl), 3602 S->getI64Imm(63, dl)), 0); 3603 SDValue SRADINode = 3604 SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, 3605 LHS, S->getI64Imm(63, dl)), 0); 3606 SDValue SRDINode = 3607 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3608 RHS, S->getI64Imm(1, dl), 3609 S->getI64Imm(63, dl)), 0); 3610 SDValue SUBFC8Carry = 3611 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3612 RHS, LHS), 1); 3613 SDValue ADDE8Node = 3614 SDValue(CurDAG->getMachineNode(PPC::ADDE8, dl, MVT::i64, MVT::Glue, 3615 SRDINode, SRADINode, SUBFC8Carry), 0); 3616 return SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64, 3617 ADDE8Node, S->getI64Imm(1, dl)), 0); 3618 } 3619 case ISD::SETUGE: 3620 // {subc.reg, subc.CA} = (subcarry %a, %b) 3621 // (zext (setcc %a, %b, setuge)) -> (add (sube %b, %b, subc.CA), 1) 3622 std::swap(LHS, RHS); 3623 LLVM_FALLTHROUGH; 3624 case ISD::SETULE: { 3625 // {subc.reg, subc.CA} = (subcarry %b, %a) 3626 // (zext (setcc %a, %b, setule)) -> (add (sube %a, %a, subc.CA), 1) 3627 SDValue SUBFC8Carry = 3628 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3629 LHS, RHS), 1); 3630 SDValue SUBFE8Node = 3631 SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, MVT::Glue, 3632 LHS, LHS, SUBFC8Carry), 0); 3633 return SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, 3634 SUBFE8Node, S->getI64Imm(1, dl)), 0); 3635 } 3636 case ISD::SETUGT: 3637 // {subc.reg, subc.CA} = (subcarry %b, %a) 3638 // (zext (setcc %a, %b, setugt)) -> -(sube %b, %b, subc.CA) 3639 std::swap(LHS, RHS); 3640 LLVM_FALLTHROUGH; 3641 case ISD::SETULT: { 3642 // {subc.reg, subc.CA} = (subcarry %a, %b) 3643 // (zext (setcc %a, %b, setult)) -> -(sube %a, %a, subc.CA) 3644 SDValue SubtractCarry = 3645 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3646 RHS, LHS), 1); 3647 SDValue ExtSub = 3648 SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, 3649 LHS, LHS, SubtractCarry), 0); 3650 return SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, 3651 ExtSub), 0); 3652 } 3653 } 3654 } 3655 3656 /// Produces a sign-extended result of comparing two 64-bit values according to 3657 /// the passed condition code. 3658 SDValue 3659 IntegerCompareEliminator::get64BitSExtCompare(SDValue LHS, SDValue RHS, 3660 ISD::CondCode CC, 3661 int64_t RHSValue, SDLoc dl) { 3662 if (CmpInGPR == ICGPR_I32 || CmpInGPR == ICGPR_SextI32 || 3663 CmpInGPR == ICGPR_ZextI32 || CmpInGPR == ICGPR_Zext) 3664 return SDValue(); 3665 bool IsRHSZero = RHSValue == 0; 3666 bool IsRHSOne = RHSValue == 1; 3667 bool IsRHSNegOne = RHSValue == -1LL; 3668 switch (CC) { 3669 default: return SDValue(); 3670 case ISD::SETEQ: { 3671 // {addc.reg, addc.CA} = (addcarry (xor %a, %b), -1) 3672 // (sext (setcc %a, %b, seteq)) -> (sube addc.reg, addc.reg, addc.CA) 3673 // {addcz.reg, addcz.CA} = (addcarry %a, -1) 3674 // (sext (setcc %a, 0, seteq)) -> (sube addcz.reg, addcz.reg, addcz.CA) 3675 SDValue AddInput = IsRHSZero ? LHS : 3676 SDValue(CurDAG->getMachineNode(PPC::XOR8, dl, MVT::i64, LHS, RHS), 0); 3677 SDValue Addic = 3678 SDValue(CurDAG->getMachineNode(PPC::ADDIC8, dl, MVT::i64, MVT::Glue, 3679 AddInput, S->getI32Imm(~0U, dl)), 0); 3680 return SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, Addic, 3681 Addic, Addic.getValue(1)), 0); 3682 } 3683 case ISD::SETNE: { 3684 // {subfc.reg, subfc.CA} = (subcarry 0, (xor %a, %b)) 3685 // (sext (setcc %a, %b, setne)) -> (sube subfc.reg, subfc.reg, subfc.CA) 3686 // {subfcz.reg, subfcz.CA} = (subcarry 0, %a) 3687 // (sext (setcc %a, 0, setne)) -> (sube subfcz.reg, subfcz.reg, subfcz.CA) 3688 SDValue Xor = IsRHSZero ? LHS : 3689 SDValue(CurDAG->getMachineNode(PPC::XOR8, dl, MVT::i64, LHS, RHS), 0); 3690 SDValue SC = 3691 SDValue(CurDAG->getMachineNode(PPC::SUBFIC8, dl, MVT::i64, MVT::Glue, 3692 Xor, S->getI32Imm(0, dl)), 0); 3693 return SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, SC, 3694 SC, SC.getValue(1)), 0); 3695 } 3696 case ISD::SETGE: { 3697 // {subc.reg, subc.CA} = (subcarry %a, %b) 3698 // (zext (setcc %a, %b, setge)) -> 3699 // (- (adde (lshr %b, 63), (ashr %a, 63), subc.CA)) 3700 // (zext (setcc %a, 0, setge)) -> (~ (ashr %a, 63)) 3701 if (IsRHSZero) 3702 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GESExt); 3703 std::swap(LHS, RHS); 3704 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3705 IsRHSZero = RHSConst && RHSConst->isZero(); 3706 LLVM_FALLTHROUGH; 3707 } 3708 case ISD::SETLE: { 3709 // {subc.reg, subc.CA} = (subcarry %b, %a) 3710 // (zext (setcc %a, %b, setge)) -> 3711 // (- (adde (lshr %a, 63), (ashr %b, 63), subc.CA)) 3712 // (zext (setcc %a, 0, setge)) -> (ashr (or %a, (add %a, -1)), 63) 3713 if (IsRHSZero) 3714 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LESExt); 3715 SDValue ShiftR = 3716 SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, RHS, 3717 S->getI64Imm(63, dl)), 0); 3718 SDValue ShiftL = 3719 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, LHS, 3720 S->getI64Imm(1, dl), 3721 S->getI64Imm(63, dl)), 0); 3722 SDValue SubtractCarry = 3723 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3724 LHS, RHS), 1); 3725 SDValue Adde = 3726 SDValue(CurDAG->getMachineNode(PPC::ADDE8, dl, MVT::i64, MVT::Glue, 3727 ShiftR, ShiftL, SubtractCarry), 0); 3728 return SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, Adde), 0); 3729 } 3730 case ISD::SETGT: { 3731 // {subc.reg, subc.CA} = (subcarry %b, %a) 3732 // (zext (setcc %a, %b, setgt)) -> 3733 // -(xor (adde (lshr %a, 63), (ashr %b, 63), subc.CA), 1) 3734 // (zext (setcc %a, 0, setgt)) -> (ashr (nor (add %a, -1), %a), 63) 3735 if (IsRHSNegOne) 3736 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::GESExt); 3737 if (IsRHSZero) { 3738 SDValue Add = 3739 SDValue(CurDAG->getMachineNode(PPC::ADDI8, dl, MVT::i64, LHS, 3740 S->getI64Imm(-1, dl)), 0); 3741 SDValue Nor = 3742 SDValue(CurDAG->getMachineNode(PPC::NOR8, dl, MVT::i64, Add, LHS), 0); 3743 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, Nor, 3744 S->getI64Imm(63, dl)), 0); 3745 } 3746 std::swap(LHS, RHS); 3747 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3748 IsRHSZero = RHSConst && RHSConst->isZero(); 3749 IsRHSOne = RHSConst && RHSConst->getSExtValue() == 1; 3750 LLVM_FALLTHROUGH; 3751 } 3752 case ISD::SETLT: { 3753 // {subc.reg, subc.CA} = (subcarry %a, %b) 3754 // (zext (setcc %a, %b, setlt)) -> 3755 // -(xor (adde (lshr %b, 63), (ashr %a, 63), subc.CA), 1) 3756 // (zext (setcc %a, 0, setlt)) -> (ashr %a, 63) 3757 if (IsRHSOne) 3758 return getCompoundZeroComparisonInGPR(LHS, dl, ZeroCompare::LESExt); 3759 if (IsRHSZero) { 3760 return SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, LHS, 3761 S->getI64Imm(63, dl)), 0); 3762 } 3763 SDValue SRADINode = 3764 SDValue(CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, 3765 LHS, S->getI64Imm(63, dl)), 0); 3766 SDValue SRDINode = 3767 SDValue(CurDAG->getMachineNode(PPC::RLDICL, dl, MVT::i64, 3768 RHS, S->getI64Imm(1, dl), 3769 S->getI64Imm(63, dl)), 0); 3770 SDValue SUBFC8Carry = 3771 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3772 RHS, LHS), 1); 3773 SDValue ADDE8Node = 3774 SDValue(CurDAG->getMachineNode(PPC::ADDE8, dl, MVT::i64, 3775 SRDINode, SRADINode, SUBFC8Carry), 0); 3776 SDValue XORI8Node = 3777 SDValue(CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64, 3778 ADDE8Node, S->getI64Imm(1, dl)), 0); 3779 return SDValue(CurDAG->getMachineNode(PPC::NEG8, dl, MVT::i64, 3780 XORI8Node), 0); 3781 } 3782 case ISD::SETUGE: 3783 // {subc.reg, subc.CA} = (subcarry %a, %b) 3784 // (sext (setcc %a, %b, setuge)) -> ~(sube %b, %b, subc.CA) 3785 std::swap(LHS, RHS); 3786 LLVM_FALLTHROUGH; 3787 case ISD::SETULE: { 3788 // {subc.reg, subc.CA} = (subcarry %b, %a) 3789 // (sext (setcc %a, %b, setule)) -> ~(sube %a, %a, subc.CA) 3790 SDValue SubtractCarry = 3791 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3792 LHS, RHS), 1); 3793 SDValue ExtSub = 3794 SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, MVT::Glue, LHS, 3795 LHS, SubtractCarry), 0); 3796 return SDValue(CurDAG->getMachineNode(PPC::NOR8, dl, MVT::i64, 3797 ExtSub, ExtSub), 0); 3798 } 3799 case ISD::SETUGT: 3800 // {subc.reg, subc.CA} = (subcarry %b, %a) 3801 // (sext (setcc %a, %b, setugt)) -> (sube %b, %b, subc.CA) 3802 std::swap(LHS, RHS); 3803 LLVM_FALLTHROUGH; 3804 case ISD::SETULT: { 3805 // {subc.reg, subc.CA} = (subcarry %a, %b) 3806 // (sext (setcc %a, %b, setult)) -> (sube %a, %a, subc.CA) 3807 SDValue SubCarry = 3808 SDValue(CurDAG->getMachineNode(PPC::SUBFC8, dl, MVT::i64, MVT::Glue, 3809 RHS, LHS), 1); 3810 return SDValue(CurDAG->getMachineNode(PPC::SUBFE8, dl, MVT::i64, 3811 LHS, LHS, SubCarry), 0); 3812 } 3813 } 3814 } 3815 3816 /// Do all uses of this SDValue need the result in a GPR? 3817 /// This is meant to be used on values that have type i1 since 3818 /// it is somewhat meaningless to ask if values of other types 3819 /// should be kept in GPR's. 3820 static bool allUsesExtend(SDValue Compare, SelectionDAG *CurDAG) { 3821 assert(Compare.getOpcode() == ISD::SETCC && 3822 "An ISD::SETCC node required here."); 3823 3824 // For values that have a single use, the caller should obviously already have 3825 // checked if that use is an extending use. We check the other uses here. 3826 if (Compare.hasOneUse()) 3827 return true; 3828 // We want the value in a GPR if it is being extended, used for a select, or 3829 // used in logical operations. 3830 for (auto CompareUse : Compare.getNode()->uses()) 3831 if (CompareUse->getOpcode() != ISD::SIGN_EXTEND && 3832 CompareUse->getOpcode() != ISD::ZERO_EXTEND && 3833 CompareUse->getOpcode() != ISD::SELECT && 3834 !isLogicOp(CompareUse->getOpcode())) { 3835 OmittedForNonExtendUses++; 3836 return false; 3837 } 3838 return true; 3839 } 3840 3841 /// Returns an equivalent of a SETCC node but with the result the same width as 3842 /// the inputs. This can also be used for SELECT_CC if either the true or false 3843 /// values is a power of two while the other is zero. 3844 SDValue IntegerCompareEliminator::getSETCCInGPR(SDValue Compare, 3845 SetccInGPROpts ConvOpts) { 3846 assert((Compare.getOpcode() == ISD::SETCC || 3847 Compare.getOpcode() == ISD::SELECT_CC) && 3848 "An ISD::SETCC node required here."); 3849 3850 // Don't convert this comparison to a GPR sequence because there are uses 3851 // of the i1 result (i.e. uses that require the result in the CR). 3852 if ((Compare.getOpcode() == ISD::SETCC) && !allUsesExtend(Compare, CurDAG)) 3853 return SDValue(); 3854 3855 SDValue LHS = Compare.getOperand(0); 3856 SDValue RHS = Compare.getOperand(1); 3857 3858 // The condition code is operand 2 for SETCC and operand 4 for SELECT_CC. 3859 int CCOpNum = Compare.getOpcode() == ISD::SELECT_CC ? 4 : 2; 3860 ISD::CondCode CC = 3861 cast<CondCodeSDNode>(Compare.getOperand(CCOpNum))->get(); 3862 EVT InputVT = LHS.getValueType(); 3863 if (InputVT != MVT::i32 && InputVT != MVT::i64) 3864 return SDValue(); 3865 3866 if (ConvOpts == SetccInGPROpts::ZExtInvert || 3867 ConvOpts == SetccInGPROpts::SExtInvert) 3868 CC = ISD::getSetCCInverse(CC, InputVT); 3869 3870 bool Inputs32Bit = InputVT == MVT::i32; 3871 3872 SDLoc dl(Compare); 3873 ConstantSDNode *RHSConst = dyn_cast<ConstantSDNode>(RHS); 3874 int64_t RHSValue = RHSConst ? RHSConst->getSExtValue() : INT64_MAX; 3875 bool IsSext = ConvOpts == SetccInGPROpts::SExtOrig || 3876 ConvOpts == SetccInGPROpts::SExtInvert; 3877 3878 if (IsSext && Inputs32Bit) 3879 return get32BitSExtCompare(LHS, RHS, CC, RHSValue, dl); 3880 else if (Inputs32Bit) 3881 return get32BitZExtCompare(LHS, RHS, CC, RHSValue, dl); 3882 else if (IsSext) 3883 return get64BitSExtCompare(LHS, RHS, CC, RHSValue, dl); 3884 return get64BitZExtCompare(LHS, RHS, CC, RHSValue, dl); 3885 } 3886 3887 } // end anonymous namespace 3888 3889 bool PPCDAGToDAGISel::tryIntCompareInGPR(SDNode *N) { 3890 if (N->getValueType(0) != MVT::i32 && 3891 N->getValueType(0) != MVT::i64) 3892 return false; 3893 3894 // This optimization will emit code that assumes 64-bit registers 3895 // so we don't want to run it in 32-bit mode. Also don't run it 3896 // on functions that are not to be optimized. 3897 if (TM.getOptLevel() == CodeGenOpt::None || !TM.isPPC64()) 3898 return false; 3899 3900 // For POWER10, it is more profitable to use the set boolean extension 3901 // instructions rather than the integer compare elimination codegen. 3902 // Users can override this via the command line option, `--ppc-gpr-icmps`. 3903 if (!(CmpInGPR.getNumOccurrences() > 0) && Subtarget->isISA3_1()) 3904 return false; 3905 3906 switch (N->getOpcode()) { 3907 default: break; 3908 case ISD::ZERO_EXTEND: 3909 case ISD::SIGN_EXTEND: 3910 case ISD::AND: 3911 case ISD::OR: 3912 case ISD::XOR: { 3913 IntegerCompareEliminator ICmpElim(CurDAG, this); 3914 if (SDNode *New = ICmpElim.Select(N)) { 3915 ReplaceNode(N, New); 3916 return true; 3917 } 3918 } 3919 } 3920 return false; 3921 } 3922 3923 bool PPCDAGToDAGISel::tryBitPermutation(SDNode *N) { 3924 if (N->getValueType(0) != MVT::i32 && 3925 N->getValueType(0) != MVT::i64) 3926 return false; 3927 3928 if (!UseBitPermRewriter) 3929 return false; 3930 3931 switch (N->getOpcode()) { 3932 default: break; 3933 case ISD::ROTL: 3934 case ISD::SHL: 3935 case ISD::SRL: 3936 case ISD::AND: 3937 case ISD::OR: { 3938 BitPermutationSelector BPS(CurDAG); 3939 if (SDNode *New = BPS.Select(N)) { 3940 ReplaceNode(N, New); 3941 return true; 3942 } 3943 return false; 3944 } 3945 } 3946 3947 return false; 3948 } 3949 3950 /// SelectCC - Select a comparison of the specified values with the specified 3951 /// condition code, returning the CR# of the expression. 3952 SDValue PPCDAGToDAGISel::SelectCC(SDValue LHS, SDValue RHS, ISD::CondCode CC, 3953 const SDLoc &dl, SDValue Chain) { 3954 // Always select the LHS. 3955 unsigned Opc; 3956 3957 if (LHS.getValueType() == MVT::i32) { 3958 unsigned Imm; 3959 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 3960 if (isInt32Immediate(RHS, Imm)) { 3961 // SETEQ/SETNE comparison with 16-bit immediate, fold it. 3962 if (isUInt<16>(Imm)) 3963 return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, LHS, 3964 getI32Imm(Imm & 0xFFFF, dl)), 3965 0); 3966 // If this is a 16-bit signed immediate, fold it. 3967 if (isInt<16>((int)Imm)) 3968 return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS, 3969 getI32Imm(Imm & 0xFFFF, dl)), 3970 0); 3971 3972 // For non-equality comparisons, the default code would materialize the 3973 // constant, then compare against it, like this: 3974 // lis r2, 4660 3975 // ori r2, r2, 22136 3976 // cmpw cr0, r3, r2 3977 // Since we are just comparing for equality, we can emit this instead: 3978 // xoris r0,r3,0x1234 3979 // cmplwi cr0,r0,0x5678 3980 // beq cr0,L6 3981 SDValue Xor(CurDAG->getMachineNode(PPC::XORIS, dl, MVT::i32, LHS, 3982 getI32Imm(Imm >> 16, dl)), 0); 3983 return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, Xor, 3984 getI32Imm(Imm & 0xFFFF, dl)), 0); 3985 } 3986 Opc = PPC::CMPLW; 3987 } else if (ISD::isUnsignedIntSetCC(CC)) { 3988 if (isInt32Immediate(RHS, Imm) && isUInt<16>(Imm)) 3989 return SDValue(CurDAG->getMachineNode(PPC::CMPLWI, dl, MVT::i32, LHS, 3990 getI32Imm(Imm & 0xFFFF, dl)), 0); 3991 Opc = PPC::CMPLW; 3992 } else { 3993 int16_t SImm; 3994 if (isIntS16Immediate(RHS, SImm)) 3995 return SDValue(CurDAG->getMachineNode(PPC::CMPWI, dl, MVT::i32, LHS, 3996 getI32Imm((int)SImm & 0xFFFF, 3997 dl)), 3998 0); 3999 Opc = PPC::CMPW; 4000 } 4001 } else if (LHS.getValueType() == MVT::i64) { 4002 uint64_t Imm; 4003 if (CC == ISD::SETEQ || CC == ISD::SETNE) { 4004 if (isInt64Immediate(RHS.getNode(), Imm)) { 4005 // SETEQ/SETNE comparison with 16-bit immediate, fold it. 4006 if (isUInt<16>(Imm)) 4007 return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, LHS, 4008 getI32Imm(Imm & 0xFFFF, dl)), 4009 0); 4010 // If this is a 16-bit signed immediate, fold it. 4011 if (isInt<16>(Imm)) 4012 return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS, 4013 getI32Imm(Imm & 0xFFFF, dl)), 4014 0); 4015 4016 // For non-equality comparisons, the default code would materialize the 4017 // constant, then compare against it, like this: 4018 // lis r2, 4660 4019 // ori r2, r2, 22136 4020 // cmpd cr0, r3, r2 4021 // Since we are just comparing for equality, we can emit this instead: 4022 // xoris r0,r3,0x1234 4023 // cmpldi cr0,r0,0x5678 4024 // beq cr0,L6 4025 if (isUInt<32>(Imm)) { 4026 SDValue Xor(CurDAG->getMachineNode(PPC::XORIS8, dl, MVT::i64, LHS, 4027 getI64Imm(Imm >> 16, dl)), 0); 4028 return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, Xor, 4029 getI64Imm(Imm & 0xFFFF, dl)), 4030 0); 4031 } 4032 } 4033 Opc = PPC::CMPLD; 4034 } else if (ISD::isUnsignedIntSetCC(CC)) { 4035 if (isInt64Immediate(RHS.getNode(), Imm) && isUInt<16>(Imm)) 4036 return SDValue(CurDAG->getMachineNode(PPC::CMPLDI, dl, MVT::i64, LHS, 4037 getI64Imm(Imm & 0xFFFF, dl)), 0); 4038 Opc = PPC::CMPLD; 4039 } else { 4040 int16_t SImm; 4041 if (isIntS16Immediate(RHS, SImm)) 4042 return SDValue(CurDAG->getMachineNode(PPC::CMPDI, dl, MVT::i64, LHS, 4043 getI64Imm(SImm & 0xFFFF, dl)), 4044 0); 4045 Opc = PPC::CMPD; 4046 } 4047 } else if (LHS.getValueType() == MVT::f32) { 4048 if (Subtarget->hasSPE()) { 4049 switch (CC) { 4050 default: 4051 case ISD::SETEQ: 4052 case ISD::SETNE: 4053 Opc = PPC::EFSCMPEQ; 4054 break; 4055 case ISD::SETLT: 4056 case ISD::SETGE: 4057 case ISD::SETOLT: 4058 case ISD::SETOGE: 4059 case ISD::SETULT: 4060 case ISD::SETUGE: 4061 Opc = PPC::EFSCMPLT; 4062 break; 4063 case ISD::SETGT: 4064 case ISD::SETLE: 4065 case ISD::SETOGT: 4066 case ISD::SETOLE: 4067 case ISD::SETUGT: 4068 case ISD::SETULE: 4069 Opc = PPC::EFSCMPGT; 4070 break; 4071 } 4072 } else 4073 Opc = PPC::FCMPUS; 4074 } else if (LHS.getValueType() == MVT::f64) { 4075 if (Subtarget->hasSPE()) { 4076 switch (CC) { 4077 default: 4078 case ISD::SETEQ: 4079 case ISD::SETNE: 4080 Opc = PPC::EFDCMPEQ; 4081 break; 4082 case ISD::SETLT: 4083 case ISD::SETGE: 4084 case ISD::SETOLT: 4085 case ISD::SETOGE: 4086 case ISD::SETULT: 4087 case ISD::SETUGE: 4088 Opc = PPC::EFDCMPLT; 4089 break; 4090 case ISD::SETGT: 4091 case ISD::SETLE: 4092 case ISD::SETOGT: 4093 case ISD::SETOLE: 4094 case ISD::SETUGT: 4095 case ISD::SETULE: 4096 Opc = PPC::EFDCMPGT; 4097 break; 4098 } 4099 } else 4100 Opc = Subtarget->hasVSX() ? PPC::XSCMPUDP : PPC::FCMPUD; 4101 } else { 4102 assert(LHS.getValueType() == MVT::f128 && "Unknown vt!"); 4103 assert(Subtarget->hasP9Vector() && "XSCMPUQP requires Power9 Vector"); 4104 Opc = PPC::XSCMPUQP; 4105 } 4106 if (Chain) 4107 return SDValue( 4108 CurDAG->getMachineNode(Opc, dl, MVT::i32, MVT::Other, LHS, RHS, Chain), 4109 0); 4110 else 4111 return SDValue(CurDAG->getMachineNode(Opc, dl, MVT::i32, LHS, RHS), 0); 4112 } 4113 4114 static PPC::Predicate getPredicateForSetCC(ISD::CondCode CC, const EVT &VT, 4115 const PPCSubtarget *Subtarget) { 4116 // For SPE instructions, the result is in GT bit of the CR 4117 bool UseSPE = Subtarget->hasSPE() && VT.isFloatingPoint(); 4118 4119 switch (CC) { 4120 case ISD::SETUEQ: 4121 case ISD::SETONE: 4122 case ISD::SETOLE: 4123 case ISD::SETOGE: 4124 llvm_unreachable("Should be lowered by legalize!"); 4125 default: llvm_unreachable("Unknown condition!"); 4126 case ISD::SETOEQ: 4127 case ISD::SETEQ: 4128 return UseSPE ? PPC::PRED_GT : PPC::PRED_EQ; 4129 case ISD::SETUNE: 4130 case ISD::SETNE: 4131 return UseSPE ? PPC::PRED_LE : PPC::PRED_NE; 4132 case ISD::SETOLT: 4133 case ISD::SETLT: 4134 return UseSPE ? PPC::PRED_GT : PPC::PRED_LT; 4135 case ISD::SETULE: 4136 case ISD::SETLE: 4137 return PPC::PRED_LE; 4138 case ISD::SETOGT: 4139 case ISD::SETGT: 4140 return PPC::PRED_GT; 4141 case ISD::SETUGE: 4142 case ISD::SETGE: 4143 return UseSPE ? PPC::PRED_LE : PPC::PRED_GE; 4144 case ISD::SETO: return PPC::PRED_NU; 4145 case ISD::SETUO: return PPC::PRED_UN; 4146 // These two are invalid for floating point. Assume we have int. 4147 case ISD::SETULT: return PPC::PRED_LT; 4148 case ISD::SETUGT: return PPC::PRED_GT; 4149 } 4150 } 4151 4152 /// getCRIdxForSetCC - Return the index of the condition register field 4153 /// associated with the SetCC condition, and whether or not the field is 4154 /// treated as inverted. That is, lt = 0; ge = 0 inverted. 4155 static unsigned getCRIdxForSetCC(ISD::CondCode CC, bool &Invert) { 4156 Invert = false; 4157 switch (CC) { 4158 default: llvm_unreachable("Unknown condition!"); 4159 case ISD::SETOLT: 4160 case ISD::SETLT: return 0; // Bit #0 = SETOLT 4161 case ISD::SETOGT: 4162 case ISD::SETGT: return 1; // Bit #1 = SETOGT 4163 case ISD::SETOEQ: 4164 case ISD::SETEQ: return 2; // Bit #2 = SETOEQ 4165 case ISD::SETUO: return 3; // Bit #3 = SETUO 4166 case ISD::SETUGE: 4167 case ISD::SETGE: Invert = true; return 0; // !Bit #0 = SETUGE 4168 case ISD::SETULE: 4169 case ISD::SETLE: Invert = true; return 1; // !Bit #1 = SETULE 4170 case ISD::SETUNE: 4171 case ISD::SETNE: Invert = true; return 2; // !Bit #2 = SETUNE 4172 case ISD::SETO: Invert = true; return 3; // !Bit #3 = SETO 4173 case ISD::SETUEQ: 4174 case ISD::SETOGE: 4175 case ISD::SETOLE: 4176 case ISD::SETONE: 4177 llvm_unreachable("Invalid branch code: should be expanded by legalize"); 4178 // These are invalid for floating point. Assume integer. 4179 case ISD::SETULT: return 0; 4180 case ISD::SETUGT: return 1; 4181 } 4182 } 4183 4184 // getVCmpInst: return the vector compare instruction for the specified 4185 // vector type and condition code. Since this is for altivec specific code, 4186 // only support the altivec types (v16i8, v8i16, v4i32, v2i64, v1i128, 4187 // and v4f32). 4188 static unsigned int getVCmpInst(MVT VecVT, ISD::CondCode CC, 4189 bool HasVSX, bool &Swap, bool &Negate) { 4190 Swap = false; 4191 Negate = false; 4192 4193 if (VecVT.isFloatingPoint()) { 4194 /* Handle some cases by swapping input operands. */ 4195 switch (CC) { 4196 case ISD::SETLE: CC = ISD::SETGE; Swap = true; break; 4197 case ISD::SETLT: CC = ISD::SETGT; Swap = true; break; 4198 case ISD::SETOLE: CC = ISD::SETOGE; Swap = true; break; 4199 case ISD::SETOLT: CC = ISD::SETOGT; Swap = true; break; 4200 case ISD::SETUGE: CC = ISD::SETULE; Swap = true; break; 4201 case ISD::SETUGT: CC = ISD::SETULT; Swap = true; break; 4202 default: break; 4203 } 4204 /* Handle some cases by negating the result. */ 4205 switch (CC) { 4206 case ISD::SETNE: CC = ISD::SETEQ; Negate = true; break; 4207 case ISD::SETUNE: CC = ISD::SETOEQ; Negate = true; break; 4208 case ISD::SETULE: CC = ISD::SETOGT; Negate = true; break; 4209 case ISD::SETULT: CC = ISD::SETOGE; Negate = true; break; 4210 default: break; 4211 } 4212 /* We have instructions implementing the remaining cases. */ 4213 switch (CC) { 4214 case ISD::SETEQ: 4215 case ISD::SETOEQ: 4216 if (VecVT == MVT::v4f32) 4217 return HasVSX ? PPC::XVCMPEQSP : PPC::VCMPEQFP; 4218 else if (VecVT == MVT::v2f64) 4219 return PPC::XVCMPEQDP; 4220 break; 4221 case ISD::SETGT: 4222 case ISD::SETOGT: 4223 if (VecVT == MVT::v4f32) 4224 return HasVSX ? PPC::XVCMPGTSP : PPC::VCMPGTFP; 4225 else if (VecVT == MVT::v2f64) 4226 return PPC::XVCMPGTDP; 4227 break; 4228 case ISD::SETGE: 4229 case ISD::SETOGE: 4230 if (VecVT == MVT::v4f32) 4231 return HasVSX ? PPC::XVCMPGESP : PPC::VCMPGEFP; 4232 else if (VecVT == MVT::v2f64) 4233 return PPC::XVCMPGEDP; 4234 break; 4235 default: 4236 break; 4237 } 4238 llvm_unreachable("Invalid floating-point vector compare condition"); 4239 } else { 4240 /* Handle some cases by swapping input operands. */ 4241 switch (CC) { 4242 case ISD::SETGE: CC = ISD::SETLE; Swap = true; break; 4243 case ISD::SETLT: CC = ISD::SETGT; Swap = true; break; 4244 case ISD::SETUGE: CC = ISD::SETULE; Swap = true; break; 4245 case ISD::SETULT: CC = ISD::SETUGT; Swap = true; break; 4246 default: break; 4247 } 4248 /* Handle some cases by negating the result. */ 4249 switch (CC) { 4250 case ISD::SETNE: CC = ISD::SETEQ; Negate = true; break; 4251 case ISD::SETUNE: CC = ISD::SETUEQ; Negate = true; break; 4252 case ISD::SETLE: CC = ISD::SETGT; Negate = true; break; 4253 case ISD::SETULE: CC = ISD::SETUGT; Negate = true; break; 4254 default: break; 4255 } 4256 /* We have instructions implementing the remaining cases. */ 4257 switch (CC) { 4258 case ISD::SETEQ: 4259 case ISD::SETUEQ: 4260 if (VecVT == MVT::v16i8) 4261 return PPC::VCMPEQUB; 4262 else if (VecVT == MVT::v8i16) 4263 return PPC::VCMPEQUH; 4264 else if (VecVT == MVT::v4i32) 4265 return PPC::VCMPEQUW; 4266 else if (VecVT == MVT::v2i64) 4267 return PPC::VCMPEQUD; 4268 else if (VecVT == MVT::v1i128) 4269 return PPC::VCMPEQUQ; 4270 break; 4271 case ISD::SETGT: 4272 if (VecVT == MVT::v16i8) 4273 return PPC::VCMPGTSB; 4274 else if (VecVT == MVT::v8i16) 4275 return PPC::VCMPGTSH; 4276 else if (VecVT == MVT::v4i32) 4277 return PPC::VCMPGTSW; 4278 else if (VecVT == MVT::v2i64) 4279 return PPC::VCMPGTSD; 4280 else if (VecVT == MVT::v1i128) 4281 return PPC::VCMPGTSQ; 4282 break; 4283 case ISD::SETUGT: 4284 if (VecVT == MVT::v16i8) 4285 return PPC::VCMPGTUB; 4286 else if (VecVT == MVT::v8i16) 4287 return PPC::VCMPGTUH; 4288 else if (VecVT == MVT::v4i32) 4289 return PPC::VCMPGTUW; 4290 else if (VecVT == MVT::v2i64) 4291 return PPC::VCMPGTUD; 4292 else if (VecVT == MVT::v1i128) 4293 return PPC::VCMPGTUQ; 4294 break; 4295 default: 4296 break; 4297 } 4298 llvm_unreachable("Invalid integer vector compare condition"); 4299 } 4300 } 4301 4302 bool PPCDAGToDAGISel::trySETCC(SDNode *N) { 4303 SDLoc dl(N); 4304 unsigned Imm; 4305 bool IsStrict = N->isStrictFPOpcode(); 4306 ISD::CondCode CC = 4307 cast<CondCodeSDNode>(N->getOperand(IsStrict ? 3 : 2))->get(); 4308 EVT PtrVT = 4309 CurDAG->getTargetLoweringInfo().getPointerTy(CurDAG->getDataLayout()); 4310 bool isPPC64 = (PtrVT == MVT::i64); 4311 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue(); 4312 4313 SDValue LHS = N->getOperand(IsStrict ? 1 : 0); 4314 SDValue RHS = N->getOperand(IsStrict ? 2 : 1); 4315 4316 if (!IsStrict && !Subtarget->useCRBits() && isInt32Immediate(RHS, Imm)) { 4317 // We can codegen setcc op, imm very efficiently compared to a brcond. 4318 // Check for those cases here. 4319 // setcc op, 0 4320 if (Imm == 0) { 4321 SDValue Op = LHS; 4322 switch (CC) { 4323 default: break; 4324 case ISD::SETEQ: { 4325 Op = SDValue(CurDAG->getMachineNode(PPC::CNTLZW, dl, MVT::i32, Op), 0); 4326 SDValue Ops[] = { Op, getI32Imm(27, dl), getI32Imm(5, dl), 4327 getI32Imm(31, dl) }; 4328 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4329 return true; 4330 } 4331 case ISD::SETNE: { 4332 if (isPPC64) break; 4333 SDValue AD = 4334 SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue, 4335 Op, getI32Imm(~0U, dl)), 0); 4336 CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, AD, Op, AD.getValue(1)); 4337 return true; 4338 } 4339 case ISD::SETLT: { 4340 SDValue Ops[] = { Op, getI32Imm(1, dl), getI32Imm(31, dl), 4341 getI32Imm(31, dl) }; 4342 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4343 return true; 4344 } 4345 case ISD::SETGT: { 4346 SDValue T = 4347 SDValue(CurDAG->getMachineNode(PPC::NEG, dl, MVT::i32, Op), 0); 4348 T = SDValue(CurDAG->getMachineNode(PPC::ANDC, dl, MVT::i32, T, Op), 0); 4349 SDValue Ops[] = { T, getI32Imm(1, dl), getI32Imm(31, dl), 4350 getI32Imm(31, dl) }; 4351 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4352 return true; 4353 } 4354 } 4355 } else if (Imm == ~0U) { // setcc op, -1 4356 SDValue Op = LHS; 4357 switch (CC) { 4358 default: break; 4359 case ISD::SETEQ: 4360 if (isPPC64) break; 4361 Op = SDValue(CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue, 4362 Op, getI32Imm(1, dl)), 0); 4363 CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32, 4364 SDValue(CurDAG->getMachineNode(PPC::LI, dl, 4365 MVT::i32, 4366 getI32Imm(0, dl)), 4367 0), Op.getValue(1)); 4368 return true; 4369 case ISD::SETNE: { 4370 if (isPPC64) break; 4371 Op = SDValue(CurDAG->getMachineNode(PPC::NOR, dl, MVT::i32, Op, Op), 0); 4372 SDNode *AD = CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue, 4373 Op, getI32Imm(~0U, dl)); 4374 CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(AD, 0), Op, 4375 SDValue(AD, 1)); 4376 return true; 4377 } 4378 case ISD::SETLT: { 4379 SDValue AD = SDValue(CurDAG->getMachineNode(PPC::ADDI, dl, MVT::i32, Op, 4380 getI32Imm(1, dl)), 0); 4381 SDValue AN = SDValue(CurDAG->getMachineNode(PPC::AND, dl, MVT::i32, AD, 4382 Op), 0); 4383 SDValue Ops[] = { AN, getI32Imm(1, dl), getI32Imm(31, dl), 4384 getI32Imm(31, dl) }; 4385 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4386 return true; 4387 } 4388 case ISD::SETGT: { 4389 SDValue Ops[] = { Op, getI32Imm(1, dl), getI32Imm(31, dl), 4390 getI32Imm(31, dl) }; 4391 Op = SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0); 4392 CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Op, getI32Imm(1, dl)); 4393 return true; 4394 } 4395 } 4396 } 4397 } 4398 4399 // Altivec Vector compare instructions do not set any CR register by default and 4400 // vector compare operations return the same type as the operands. 4401 if (!IsStrict && LHS.getValueType().isVector()) { 4402 if (Subtarget->hasSPE()) 4403 return false; 4404 4405 EVT VecVT = LHS.getValueType(); 4406 bool Swap, Negate; 4407 unsigned int VCmpInst = 4408 getVCmpInst(VecVT.getSimpleVT(), CC, Subtarget->hasVSX(), Swap, Negate); 4409 if (Swap) 4410 std::swap(LHS, RHS); 4411 4412 EVT ResVT = VecVT.changeVectorElementTypeToInteger(); 4413 if (Negate) { 4414 SDValue VCmp(CurDAG->getMachineNode(VCmpInst, dl, ResVT, LHS, RHS), 0); 4415 CurDAG->SelectNodeTo(N, Subtarget->hasVSX() ? PPC::XXLNOR : PPC::VNOR, 4416 ResVT, VCmp, VCmp); 4417 return true; 4418 } 4419 4420 CurDAG->SelectNodeTo(N, VCmpInst, ResVT, LHS, RHS); 4421 return true; 4422 } 4423 4424 if (Subtarget->useCRBits()) 4425 return false; 4426 4427 bool Inv; 4428 unsigned Idx = getCRIdxForSetCC(CC, Inv); 4429 SDValue CCReg = SelectCC(LHS, RHS, CC, dl, Chain); 4430 if (IsStrict) 4431 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 1), CCReg.getValue(1)); 4432 SDValue IntCR; 4433 4434 // SPE e*cmp* instructions only set the 'gt' bit, so hard-code that 4435 // The correct compare instruction is already set by SelectCC() 4436 if (Subtarget->hasSPE() && LHS.getValueType().isFloatingPoint()) { 4437 Idx = 1; 4438 } 4439 4440 // Force the ccreg into CR7. 4441 SDValue CR7Reg = CurDAG->getRegister(PPC::CR7, MVT::i32); 4442 4443 SDValue InFlag(nullptr, 0); // Null incoming flag value. 4444 CCReg = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, CR7Reg, CCReg, 4445 InFlag).getValue(1); 4446 4447 IntCR = SDValue(CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, CR7Reg, 4448 CCReg), 0); 4449 4450 SDValue Ops[] = { IntCR, getI32Imm((32 - (3 - Idx)) & 31, dl), 4451 getI32Imm(31, dl), getI32Imm(31, dl) }; 4452 if (!Inv) { 4453 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4454 return true; 4455 } 4456 4457 // Get the specified bit. 4458 SDValue Tmp = 4459 SDValue(CurDAG->getMachineNode(PPC::RLWINM, dl, MVT::i32, Ops), 0); 4460 CurDAG->SelectNodeTo(N, PPC::XORI, MVT::i32, Tmp, getI32Imm(1, dl)); 4461 return true; 4462 } 4463 4464 /// Does this node represent a load/store node whose address can be represented 4465 /// with a register plus an immediate that's a multiple of \p Val: 4466 bool PPCDAGToDAGISel::isOffsetMultipleOf(SDNode *N, unsigned Val) const { 4467 LoadSDNode *LDN = dyn_cast<LoadSDNode>(N); 4468 StoreSDNode *STN = dyn_cast<StoreSDNode>(N); 4469 SDValue AddrOp; 4470 if (LDN) 4471 AddrOp = LDN->getOperand(1); 4472 else if (STN) 4473 AddrOp = STN->getOperand(2); 4474 4475 // If the address points a frame object or a frame object with an offset, 4476 // we need to check the object alignment. 4477 short Imm = 0; 4478 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>( 4479 AddrOp.getOpcode() == ISD::ADD ? AddrOp.getOperand(0) : 4480 AddrOp)) { 4481 // If op0 is a frame index that is under aligned, we can't do it either, 4482 // because it is translated to r31 or r1 + slot + offset. We won't know the 4483 // slot number until the stack frame is finalized. 4484 const MachineFrameInfo &MFI = CurDAG->getMachineFunction().getFrameInfo(); 4485 unsigned SlotAlign = MFI.getObjectAlign(FI->getIndex()).value(); 4486 if ((SlotAlign % Val) != 0) 4487 return false; 4488 4489 // If we have an offset, we need further check on the offset. 4490 if (AddrOp.getOpcode() != ISD::ADD) 4491 return true; 4492 } 4493 4494 if (AddrOp.getOpcode() == ISD::ADD) 4495 return isIntS16Immediate(AddrOp.getOperand(1), Imm) && !(Imm % Val); 4496 4497 // If the address comes from the outside, the offset will be zero. 4498 return AddrOp.getOpcode() == ISD::CopyFromReg; 4499 } 4500 4501 void PPCDAGToDAGISel::transferMemOperands(SDNode *N, SDNode *Result) { 4502 // Transfer memoperands. 4503 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand(); 4504 CurDAG->setNodeMemRefs(cast<MachineSDNode>(Result), {MemOp}); 4505 } 4506 4507 static bool mayUseP9Setb(SDNode *N, const ISD::CondCode &CC, SelectionDAG *DAG, 4508 bool &NeedSwapOps, bool &IsUnCmp) { 4509 4510 assert(N->getOpcode() == ISD::SELECT_CC && "Expecting a SELECT_CC here."); 4511 4512 SDValue LHS = N->getOperand(0); 4513 SDValue RHS = N->getOperand(1); 4514 SDValue TrueRes = N->getOperand(2); 4515 SDValue FalseRes = N->getOperand(3); 4516 ConstantSDNode *TrueConst = dyn_cast<ConstantSDNode>(TrueRes); 4517 if (!TrueConst || (N->getSimpleValueType(0) != MVT::i64 && 4518 N->getSimpleValueType(0) != MVT::i32)) 4519 return false; 4520 4521 // We are looking for any of: 4522 // (select_cc lhs, rhs, 1, (sext (setcc [lr]hs, [lr]hs, cc2)), cc1) 4523 // (select_cc lhs, rhs, -1, (zext (setcc [lr]hs, [lr]hs, cc2)), cc1) 4524 // (select_cc lhs, rhs, 0, (select_cc [lr]hs, [lr]hs, 1, -1, cc2), seteq) 4525 // (select_cc lhs, rhs, 0, (select_cc [lr]hs, [lr]hs, -1, 1, cc2), seteq) 4526 int64_t TrueResVal = TrueConst->getSExtValue(); 4527 if ((TrueResVal < -1 || TrueResVal > 1) || 4528 (TrueResVal == -1 && FalseRes.getOpcode() != ISD::ZERO_EXTEND) || 4529 (TrueResVal == 1 && FalseRes.getOpcode() != ISD::SIGN_EXTEND) || 4530 (TrueResVal == 0 && 4531 (FalseRes.getOpcode() != ISD::SELECT_CC || CC != ISD::SETEQ))) 4532 return false; 4533 4534 SDValue SetOrSelCC = FalseRes.getOpcode() == ISD::SELECT_CC 4535 ? FalseRes 4536 : FalseRes.getOperand(0); 4537 bool InnerIsSel = SetOrSelCC.getOpcode() == ISD::SELECT_CC; 4538 if (SetOrSelCC.getOpcode() != ISD::SETCC && 4539 SetOrSelCC.getOpcode() != ISD::SELECT_CC) 4540 return false; 4541 4542 // Without this setb optimization, the outer SELECT_CC will be manually 4543 // selected to SELECT_CC_I4/SELECT_CC_I8 Pseudo, then expand-isel-pseudos pass 4544 // transforms pseudo instruction to isel instruction. When there are more than 4545 // one use for result like zext/sext, with current optimization we only see 4546 // isel is replaced by setb but can't see any significant gain. Since 4547 // setb has longer latency than original isel, we should avoid this. Another 4548 // point is that setb requires comparison always kept, it can break the 4549 // opportunity to get the comparison away if we have in future. 4550 if (!SetOrSelCC.hasOneUse() || (!InnerIsSel && !FalseRes.hasOneUse())) 4551 return false; 4552 4553 SDValue InnerLHS = SetOrSelCC.getOperand(0); 4554 SDValue InnerRHS = SetOrSelCC.getOperand(1); 4555 ISD::CondCode InnerCC = 4556 cast<CondCodeSDNode>(SetOrSelCC.getOperand(InnerIsSel ? 4 : 2))->get(); 4557 // If the inner comparison is a select_cc, make sure the true/false values are 4558 // 1/-1 and canonicalize it if needed. 4559 if (InnerIsSel) { 4560 ConstantSDNode *SelCCTrueConst = 4561 dyn_cast<ConstantSDNode>(SetOrSelCC.getOperand(2)); 4562 ConstantSDNode *SelCCFalseConst = 4563 dyn_cast<ConstantSDNode>(SetOrSelCC.getOperand(3)); 4564 if (!SelCCTrueConst || !SelCCFalseConst) 4565 return false; 4566 int64_t SelCCTVal = SelCCTrueConst->getSExtValue(); 4567 int64_t SelCCFVal = SelCCFalseConst->getSExtValue(); 4568 // The values must be -1/1 (requiring a swap) or 1/-1. 4569 if (SelCCTVal == -1 && SelCCFVal == 1) { 4570 std::swap(InnerLHS, InnerRHS); 4571 } else if (SelCCTVal != 1 || SelCCFVal != -1) 4572 return false; 4573 } 4574 4575 // Canonicalize unsigned case 4576 if (InnerCC == ISD::SETULT || InnerCC == ISD::SETUGT) { 4577 IsUnCmp = true; 4578 InnerCC = (InnerCC == ISD::SETULT) ? ISD::SETLT : ISD::SETGT; 4579 } 4580 4581 bool InnerSwapped = false; 4582 if (LHS == InnerRHS && RHS == InnerLHS) 4583 InnerSwapped = true; 4584 else if (LHS != InnerLHS || RHS != InnerRHS) 4585 return false; 4586 4587 switch (CC) { 4588 // (select_cc lhs, rhs, 0, \ 4589 // (select_cc [lr]hs, [lr]hs, 1, -1, setlt/setgt), seteq) 4590 case ISD::SETEQ: 4591 if (!InnerIsSel) 4592 return false; 4593 if (InnerCC != ISD::SETLT && InnerCC != ISD::SETGT) 4594 return false; 4595 NeedSwapOps = (InnerCC == ISD::SETGT) ? InnerSwapped : !InnerSwapped; 4596 break; 4597 4598 // (select_cc lhs, rhs, -1, (zext (setcc [lr]hs, [lr]hs, setne)), setu?lt) 4599 // (select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setgt)), setu?lt) 4600 // (select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setlt)), setu?lt) 4601 // (select_cc lhs, rhs, 1, (sext (setcc [lr]hs, [lr]hs, setne)), setu?lt) 4602 // (select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setgt)), setu?lt) 4603 // (select_cc lhs, rhs, 1, (sext (setcc rhs, lhs, setlt)), setu?lt) 4604 case ISD::SETULT: 4605 if (!IsUnCmp && InnerCC != ISD::SETNE) 4606 return false; 4607 IsUnCmp = true; 4608 LLVM_FALLTHROUGH; 4609 case ISD::SETLT: 4610 if (InnerCC == ISD::SETNE || (InnerCC == ISD::SETGT && !InnerSwapped) || 4611 (InnerCC == ISD::SETLT && InnerSwapped)) 4612 NeedSwapOps = (TrueResVal == 1); 4613 else 4614 return false; 4615 break; 4616 4617 // (select_cc lhs, rhs, 1, (sext (setcc [lr]hs, [lr]hs, setne)), setu?gt) 4618 // (select_cc lhs, rhs, 1, (sext (setcc lhs, rhs, setlt)), setu?gt) 4619 // (select_cc lhs, rhs, 1, (sext (setcc rhs, lhs, setgt)), setu?gt) 4620 // (select_cc lhs, rhs, -1, (zext (setcc [lr]hs, [lr]hs, setne)), setu?gt) 4621 // (select_cc lhs, rhs, -1, (zext (setcc lhs, rhs, setlt)), setu?gt) 4622 // (select_cc lhs, rhs, -1, (zext (setcc rhs, lhs, setgt)), setu?gt) 4623 case ISD::SETUGT: 4624 if (!IsUnCmp && InnerCC != ISD::SETNE) 4625 return false; 4626 IsUnCmp = true; 4627 LLVM_FALLTHROUGH; 4628 case ISD::SETGT: 4629 if (InnerCC == ISD::SETNE || (InnerCC == ISD::SETLT && !InnerSwapped) || 4630 (InnerCC == ISD::SETGT && InnerSwapped)) 4631 NeedSwapOps = (TrueResVal == -1); 4632 else 4633 return false; 4634 break; 4635 4636 default: 4637 return false; 4638 } 4639 4640 LLVM_DEBUG(dbgs() << "Found a node that can be lowered to a SETB: "); 4641 LLVM_DEBUG(N->dump()); 4642 4643 return true; 4644 } 4645 4646 // Return true if it's a software square-root/divide operand. 4647 static bool isSWTestOp(SDValue N) { 4648 if (N.getOpcode() == PPCISD::FTSQRT) 4649 return true; 4650 if (N.getNumOperands() < 1 || !isa<ConstantSDNode>(N.getOperand(0))) 4651 return false; 4652 switch (N.getConstantOperandVal(0)) { 4653 case Intrinsic::ppc_vsx_xvtdivdp: 4654 case Intrinsic::ppc_vsx_xvtdivsp: 4655 case Intrinsic::ppc_vsx_xvtsqrtdp: 4656 case Intrinsic::ppc_vsx_xvtsqrtsp: 4657 return true; 4658 } 4659 return false; 4660 } 4661 4662 bool PPCDAGToDAGISel::tryFoldSWTestBRCC(SDNode *N) { 4663 assert(N->getOpcode() == ISD::BR_CC && "ISD::BR_CC is expected."); 4664 // We are looking for following patterns, where `truncate to i1` actually has 4665 // the same semantic with `and 1`. 4666 // (br_cc seteq, (truncateToi1 SWTestOp), 0) -> (BCC PRED_NU, SWTestOp) 4667 // (br_cc seteq, (and SWTestOp, 2), 0) -> (BCC PRED_NE, SWTestOp) 4668 // (br_cc seteq, (and SWTestOp, 4), 0) -> (BCC PRED_LE, SWTestOp) 4669 // (br_cc seteq, (and SWTestOp, 8), 0) -> (BCC PRED_GE, SWTestOp) 4670 // (br_cc setne, (truncateToi1 SWTestOp), 0) -> (BCC PRED_UN, SWTestOp) 4671 // (br_cc setne, (and SWTestOp, 2), 0) -> (BCC PRED_EQ, SWTestOp) 4672 // (br_cc setne, (and SWTestOp, 4), 0) -> (BCC PRED_GT, SWTestOp) 4673 // (br_cc setne, (and SWTestOp, 8), 0) -> (BCC PRED_LT, SWTestOp) 4674 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 4675 if (CC != ISD::SETEQ && CC != ISD::SETNE) 4676 return false; 4677 4678 SDValue CmpRHS = N->getOperand(3); 4679 if (!isa<ConstantSDNode>(CmpRHS) || 4680 cast<ConstantSDNode>(CmpRHS)->getSExtValue() != 0) 4681 return false; 4682 4683 SDValue CmpLHS = N->getOperand(2); 4684 if (CmpLHS.getNumOperands() < 1 || !isSWTestOp(CmpLHS.getOperand(0))) 4685 return false; 4686 4687 unsigned PCC = 0; 4688 bool IsCCNE = CC == ISD::SETNE; 4689 if (CmpLHS.getOpcode() == ISD::AND && 4690 isa<ConstantSDNode>(CmpLHS.getOperand(1))) 4691 switch (CmpLHS.getConstantOperandVal(1)) { 4692 case 1: 4693 PCC = IsCCNE ? PPC::PRED_UN : PPC::PRED_NU; 4694 break; 4695 case 2: 4696 PCC = IsCCNE ? PPC::PRED_EQ : PPC::PRED_NE; 4697 break; 4698 case 4: 4699 PCC = IsCCNE ? PPC::PRED_GT : PPC::PRED_LE; 4700 break; 4701 case 8: 4702 PCC = IsCCNE ? PPC::PRED_LT : PPC::PRED_GE; 4703 break; 4704 default: 4705 return false; 4706 } 4707 else if (CmpLHS.getOpcode() == ISD::TRUNCATE && 4708 CmpLHS.getValueType() == MVT::i1) 4709 PCC = IsCCNE ? PPC::PRED_UN : PPC::PRED_NU; 4710 4711 if (PCC) { 4712 SDLoc dl(N); 4713 SDValue Ops[] = {getI32Imm(PCC, dl), CmpLHS.getOperand(0), N->getOperand(4), 4714 N->getOperand(0)}; 4715 CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops); 4716 return true; 4717 } 4718 return false; 4719 } 4720 4721 bool PPCDAGToDAGISel::tryAsSingleRLWINM(SDNode *N) { 4722 assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected"); 4723 unsigned Imm; 4724 if (!isInt32Immediate(N->getOperand(1), Imm)) 4725 return false; 4726 4727 SDLoc dl(N); 4728 SDValue Val = N->getOperand(0); 4729 unsigned SH, MB, ME; 4730 // If this is an and of a value rotated between 0 and 31 bits and then and'd 4731 // with a mask, emit rlwinm 4732 if (isRotateAndMask(Val.getNode(), Imm, false, SH, MB, ME)) { 4733 Val = Val.getOperand(0); 4734 SDValue Ops[] = {Val, getI32Imm(SH, dl), getI32Imm(MB, dl), 4735 getI32Imm(ME, dl)}; 4736 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4737 return true; 4738 } 4739 4740 // If this is just a masked value where the input is not handled, and 4741 // is not a rotate-left (handled by a pattern in the .td file), emit rlwinm 4742 if (isRunOfOnes(Imm, MB, ME) && Val.getOpcode() != ISD::ROTL) { 4743 SDValue Ops[] = {Val, getI32Imm(0, dl), getI32Imm(MB, dl), 4744 getI32Imm(ME, dl)}; 4745 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 4746 return true; 4747 } 4748 4749 // AND X, 0 -> 0, not "rlwinm 32". 4750 if (Imm == 0) { 4751 ReplaceUses(SDValue(N, 0), N->getOperand(1)); 4752 return true; 4753 } 4754 4755 return false; 4756 } 4757 4758 bool PPCDAGToDAGISel::tryAsSingleRLWINM8(SDNode *N) { 4759 assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected"); 4760 uint64_t Imm64; 4761 if (!isInt64Immediate(N->getOperand(1).getNode(), Imm64)) 4762 return false; 4763 4764 unsigned MB, ME; 4765 if (isRunOfOnes64(Imm64, MB, ME) && MB >= 32 && MB <= ME) { 4766 // MB ME 4767 // +----------------------+ 4768 // |xxxxxxxxxxx00011111000| 4769 // +----------------------+ 4770 // 0 32 64 4771 // We can only do it if the MB is larger than 32 and MB <= ME 4772 // as RLWINM will replace the contents of [0 - 32) with [32 - 64) even 4773 // we didn't rotate it. 4774 SDLoc dl(N); 4775 SDValue Ops[] = {N->getOperand(0), getI64Imm(0, dl), getI64Imm(MB - 32, dl), 4776 getI64Imm(ME - 32, dl)}; 4777 CurDAG->SelectNodeTo(N, PPC::RLWINM8, MVT::i64, Ops); 4778 return true; 4779 } 4780 4781 return false; 4782 } 4783 4784 bool PPCDAGToDAGISel::tryAsPairOfRLDICL(SDNode *N) { 4785 assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected"); 4786 uint64_t Imm64; 4787 if (!isInt64Immediate(N->getOperand(1).getNode(), Imm64)) 4788 return false; 4789 4790 // Do nothing if it is 16-bit imm as the pattern in the .td file handle 4791 // it well with "andi.". 4792 if (isUInt<16>(Imm64)) 4793 return false; 4794 4795 SDLoc Loc(N); 4796 SDValue Val = N->getOperand(0); 4797 4798 // Optimized with two rldicl's as follows: 4799 // Add missing bits on left to the mask and check that the mask is a 4800 // wrapped run of ones, i.e. 4801 // Change pattern |0001111100000011111111| 4802 // to |1111111100000011111111|. 4803 unsigned NumOfLeadingZeros = countLeadingZeros(Imm64); 4804 if (NumOfLeadingZeros != 0) 4805 Imm64 |= maskLeadingOnes<uint64_t>(NumOfLeadingZeros); 4806 4807 unsigned MB, ME; 4808 if (!isRunOfOnes64(Imm64, MB, ME)) 4809 return false; 4810 4811 // ME MB MB-ME+63 4812 // +----------------------+ +----------------------+ 4813 // |1111111100000011111111| -> |0000001111111111111111| 4814 // +----------------------+ +----------------------+ 4815 // 0 63 0 63 4816 // There are ME + 1 ones on the left and (MB - ME + 63) & 63 zeros in between. 4817 unsigned OnesOnLeft = ME + 1; 4818 unsigned ZerosInBetween = (MB - ME + 63) & 63; 4819 // Rotate left by OnesOnLeft (so leading ones are now trailing ones) and clear 4820 // on the left the bits that are already zeros in the mask. 4821 Val = SDValue(CurDAG->getMachineNode(PPC::RLDICL, Loc, MVT::i64, Val, 4822 getI64Imm(OnesOnLeft, Loc), 4823 getI64Imm(ZerosInBetween, Loc)), 4824 0); 4825 // MB-ME+63 ME MB 4826 // +----------------------+ +----------------------+ 4827 // |0000001111111111111111| -> |0001111100000011111111| 4828 // +----------------------+ +----------------------+ 4829 // 0 63 0 63 4830 // Rotate back by 64 - OnesOnLeft to undo previous rotate. Then clear on the 4831 // left the number of ones we previously added. 4832 SDValue Ops[] = {Val, getI64Imm(64 - OnesOnLeft, Loc), 4833 getI64Imm(NumOfLeadingZeros, Loc)}; 4834 CurDAG->SelectNodeTo(N, PPC::RLDICL, MVT::i64, Ops); 4835 return true; 4836 } 4837 4838 bool PPCDAGToDAGISel::tryAsSingleRLWIMI(SDNode *N) { 4839 assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected"); 4840 unsigned Imm; 4841 if (!isInt32Immediate(N->getOperand(1), Imm)) 4842 return false; 4843 4844 SDValue Val = N->getOperand(0); 4845 unsigned Imm2; 4846 // ISD::OR doesn't get all the bitfield insertion fun. 4847 // (and (or x, c1), c2) where isRunOfOnes(~(c1^c2)) might be a 4848 // bitfield insert. 4849 if (Val.getOpcode() != ISD::OR || !isInt32Immediate(Val.getOperand(1), Imm2)) 4850 return false; 4851 4852 // The idea here is to check whether this is equivalent to: 4853 // (c1 & m) | (x & ~m) 4854 // where m is a run-of-ones mask. The logic here is that, for each bit in 4855 // c1 and c2: 4856 // - if both are 1, then the output will be 1. 4857 // - if both are 0, then the output will be 0. 4858 // - if the bit in c1 is 0, and the bit in c2 is 1, then the output will 4859 // come from x. 4860 // - if the bit in c1 is 1, and the bit in c2 is 0, then the output will 4861 // be 0. 4862 // If that last condition is never the case, then we can form m from the 4863 // bits that are the same between c1 and c2. 4864 unsigned MB, ME; 4865 if (isRunOfOnes(~(Imm ^ Imm2), MB, ME) && !(~Imm & Imm2)) { 4866 SDLoc dl(N); 4867 SDValue Ops[] = {Val.getOperand(0), Val.getOperand(1), getI32Imm(0, dl), 4868 getI32Imm(MB, dl), getI32Imm(ME, dl)}; 4869 ReplaceNode(N, CurDAG->getMachineNode(PPC::RLWIMI, dl, MVT::i32, Ops)); 4870 return true; 4871 } 4872 4873 return false; 4874 } 4875 4876 bool PPCDAGToDAGISel::tryAsSingleRLDICL(SDNode *N) { 4877 assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected"); 4878 uint64_t Imm64; 4879 if (!isInt64Immediate(N->getOperand(1).getNode(), Imm64) || !isMask_64(Imm64)) 4880 return false; 4881 4882 // If this is a 64-bit zero-extension mask, emit rldicl. 4883 unsigned MB = 64 - countTrailingOnes(Imm64); 4884 unsigned SH = 0; 4885 unsigned Imm; 4886 SDValue Val = N->getOperand(0); 4887 SDLoc dl(N); 4888 4889 if (Val.getOpcode() == ISD::ANY_EXTEND) { 4890 auto Op0 = Val.getOperand(0); 4891 if (Op0.getOpcode() == ISD::SRL && 4892 isInt32Immediate(Op0.getOperand(1).getNode(), Imm) && Imm <= MB) { 4893 4894 auto ResultType = Val.getNode()->getValueType(0); 4895 auto ImDef = CurDAG->getMachineNode(PPC::IMPLICIT_DEF, dl, ResultType); 4896 SDValue IDVal(ImDef, 0); 4897 4898 Val = SDValue(CurDAG->getMachineNode(PPC::INSERT_SUBREG, dl, ResultType, 4899 IDVal, Op0.getOperand(0), 4900 getI32Imm(1, dl)), 4901 0); 4902 SH = 64 - Imm; 4903 } 4904 } 4905 4906 // If the operand is a logical right shift, we can fold it into this 4907 // instruction: rldicl(rldicl(x, 64-n, n), 0, mb) -> rldicl(x, 64-n, mb) 4908 // for n <= mb. The right shift is really a left rotate followed by a 4909 // mask, and this mask is a more-restrictive sub-mask of the mask implied 4910 // by the shift. 4911 if (Val.getOpcode() == ISD::SRL && 4912 isInt32Immediate(Val.getOperand(1).getNode(), Imm) && Imm <= MB) { 4913 assert(Imm < 64 && "Illegal shift amount"); 4914 Val = Val.getOperand(0); 4915 SH = 64 - Imm; 4916 } 4917 4918 SDValue Ops[] = {Val, getI32Imm(SH, dl), getI32Imm(MB, dl)}; 4919 CurDAG->SelectNodeTo(N, PPC::RLDICL, MVT::i64, Ops); 4920 return true; 4921 } 4922 4923 bool PPCDAGToDAGISel::tryAsSingleRLDICR(SDNode *N) { 4924 assert(N->getOpcode() == ISD::AND && "ISD::AND SDNode expected"); 4925 uint64_t Imm64; 4926 if (!isInt64Immediate(N->getOperand(1).getNode(), Imm64) || 4927 !isMask_64(~Imm64)) 4928 return false; 4929 4930 // If this is a negated 64-bit zero-extension mask, 4931 // i.e. the immediate is a sequence of ones from most significant side 4932 // and all zero for reminder, we should use rldicr. 4933 unsigned MB = 63 - countTrailingOnes(~Imm64); 4934 unsigned SH = 0; 4935 SDLoc dl(N); 4936 SDValue Ops[] = {N->getOperand(0), getI32Imm(SH, dl), getI32Imm(MB, dl)}; 4937 CurDAG->SelectNodeTo(N, PPC::RLDICR, MVT::i64, Ops); 4938 return true; 4939 } 4940 4941 bool PPCDAGToDAGISel::tryAsSingleRLDIMI(SDNode *N) { 4942 assert(N->getOpcode() == ISD::OR && "ISD::OR SDNode expected"); 4943 uint64_t Imm64; 4944 unsigned MB, ME; 4945 SDValue N0 = N->getOperand(0); 4946 4947 // We won't get fewer instructions if the imm is 32-bit integer. 4948 // rldimi requires the imm to have consecutive ones with both sides zero. 4949 // Also, make sure the first Op has only one use, otherwise this may increase 4950 // register pressure since rldimi is destructive. 4951 if (!isInt64Immediate(N->getOperand(1).getNode(), Imm64) || 4952 isUInt<32>(Imm64) || !isRunOfOnes64(Imm64, MB, ME) || !N0.hasOneUse()) 4953 return false; 4954 4955 unsigned SH = 63 - ME; 4956 SDLoc Dl(N); 4957 // Use select64Imm for making LI instr instead of directly putting Imm64 4958 SDValue Ops[] = { 4959 N->getOperand(0), 4960 SDValue(selectI64Imm(CurDAG, getI64Imm(-1, Dl).getNode()), 0), 4961 getI32Imm(SH, Dl), getI32Imm(MB, Dl)}; 4962 CurDAG->SelectNodeTo(N, PPC::RLDIMI, MVT::i64, Ops); 4963 return true; 4964 } 4965 4966 // Select - Convert the specified operand from a target-independent to a 4967 // target-specific node if it hasn't already been changed. 4968 void PPCDAGToDAGISel::Select(SDNode *N) { 4969 SDLoc dl(N); 4970 if (N->isMachineOpcode()) { 4971 N->setNodeId(-1); 4972 return; // Already selected. 4973 } 4974 4975 // In case any misguided DAG-level optimizations form an ADD with a 4976 // TargetConstant operand, crash here instead of miscompiling (by selecting 4977 // an r+r add instead of some kind of r+i add). 4978 if (N->getOpcode() == ISD::ADD && 4979 N->getOperand(1).getOpcode() == ISD::TargetConstant) 4980 llvm_unreachable("Invalid ADD with TargetConstant operand"); 4981 4982 // Try matching complex bit permutations before doing anything else. 4983 if (tryBitPermutation(N)) 4984 return; 4985 4986 // Try to emit integer compares as GPR-only sequences (i.e. no use of CR). 4987 if (tryIntCompareInGPR(N)) 4988 return; 4989 4990 switch (N->getOpcode()) { 4991 default: break; 4992 4993 case ISD::Constant: 4994 if (N->getValueType(0) == MVT::i64) { 4995 ReplaceNode(N, selectI64Imm(CurDAG, N)); 4996 return; 4997 } 4998 break; 4999 5000 case ISD::INTRINSIC_WO_CHAIN: { 5001 // We emit the PPC::FSELS instruction here because of type conflicts with 5002 // the comparison operand. The FSELS instruction is defined to use an 8-byte 5003 // comparison like the FSELD version. The fsels intrinsic takes a 4-byte 5004 // value for the comparison. When selecting through a .td file, a type 5005 // error is raised. Must check this first so we never break on the 5006 // !Subtarget->isISA3_1() check. 5007 if (N->getConstantOperandVal(0) == Intrinsic::ppc_fsels) { 5008 SDValue Ops[] = {N->getOperand(1), N->getOperand(2), N->getOperand(3)}; 5009 CurDAG->SelectNodeTo(N, PPC::FSELS, MVT::f32, Ops); 5010 return; 5011 } 5012 5013 if (!Subtarget->isISA3_1()) 5014 break; 5015 unsigned Opcode = 0; 5016 switch (N->getConstantOperandVal(0)) { 5017 default: 5018 break; 5019 case Intrinsic::ppc_altivec_vstribr_p: 5020 Opcode = PPC::VSTRIBR_rec; 5021 break; 5022 case Intrinsic::ppc_altivec_vstribl_p: 5023 Opcode = PPC::VSTRIBL_rec; 5024 break; 5025 case Intrinsic::ppc_altivec_vstrihr_p: 5026 Opcode = PPC::VSTRIHR_rec; 5027 break; 5028 case Intrinsic::ppc_altivec_vstrihl_p: 5029 Opcode = PPC::VSTRIHL_rec; 5030 break; 5031 } 5032 if (!Opcode) 5033 break; 5034 5035 // Generate the appropriate vector string isolate intrinsic to match. 5036 EVT VTs[] = {MVT::v16i8, MVT::Glue}; 5037 SDValue VecStrOp = 5038 SDValue(CurDAG->getMachineNode(Opcode, dl, VTs, N->getOperand(2)), 0); 5039 // Vector string isolate instructions update the EQ bit of CR6. 5040 // Generate a SETBC instruction to extract the bit and place it in a GPR. 5041 SDValue SubRegIdx = CurDAG->getTargetConstant(PPC::sub_eq, dl, MVT::i32); 5042 SDValue CR6Reg = CurDAG->getRegister(PPC::CR6, MVT::i32); 5043 SDValue CRBit = SDValue( 5044 CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, MVT::i1, 5045 CR6Reg, SubRegIdx, VecStrOp.getValue(1)), 5046 0); 5047 CurDAG->SelectNodeTo(N, PPC::SETBC, MVT::i32, CRBit); 5048 return; 5049 } 5050 5051 case ISD::SETCC: 5052 case ISD::STRICT_FSETCC: 5053 case ISD::STRICT_FSETCCS: 5054 if (trySETCC(N)) 5055 return; 5056 break; 5057 // These nodes will be transformed into GETtlsADDR32 node, which 5058 // later becomes BL_TLS __tls_get_addr(sym at tlsgd)@PLT 5059 case PPCISD::ADDI_TLSLD_L_ADDR: 5060 case PPCISD::ADDI_TLSGD_L_ADDR: { 5061 const Module *Mod = MF->getFunction().getParent(); 5062 if (PPCLowering->getPointerTy(CurDAG->getDataLayout()) != MVT::i32 || 5063 !Subtarget->isSecurePlt() || !Subtarget->isTargetELF() || 5064 Mod->getPICLevel() == PICLevel::SmallPIC) 5065 break; 5066 // Attach global base pointer on GETtlsADDR32 node in order to 5067 // generate secure plt code for TLS symbols. 5068 getGlobalBaseReg(); 5069 } break; 5070 case PPCISD::CALL: { 5071 if (PPCLowering->getPointerTy(CurDAG->getDataLayout()) != MVT::i32 || 5072 !TM.isPositionIndependent() || !Subtarget->isSecurePlt() || 5073 !Subtarget->isTargetELF()) 5074 break; 5075 5076 SDValue Op = N->getOperand(1); 5077 5078 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 5079 if (GA->getTargetFlags() == PPCII::MO_PLT) 5080 getGlobalBaseReg(); 5081 } 5082 else if (ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op)) { 5083 if (ES->getTargetFlags() == PPCII::MO_PLT) 5084 getGlobalBaseReg(); 5085 } 5086 } 5087 break; 5088 5089 case PPCISD::GlobalBaseReg: 5090 ReplaceNode(N, getGlobalBaseReg()); 5091 return; 5092 5093 case ISD::FrameIndex: 5094 selectFrameIndex(N, N); 5095 return; 5096 5097 case PPCISD::MFOCRF: { 5098 SDValue InFlag = N->getOperand(1); 5099 ReplaceNode(N, CurDAG->getMachineNode(PPC::MFOCRF, dl, MVT::i32, 5100 N->getOperand(0), InFlag)); 5101 return; 5102 } 5103 5104 case PPCISD::READ_TIME_BASE: 5105 ReplaceNode(N, CurDAG->getMachineNode(PPC::ReadTB, dl, MVT::i32, MVT::i32, 5106 MVT::Other, N->getOperand(0))); 5107 return; 5108 5109 case PPCISD::SRA_ADDZE: { 5110 SDValue N0 = N->getOperand(0); 5111 SDValue ShiftAmt = 5112 CurDAG->getTargetConstant(*cast<ConstantSDNode>(N->getOperand(1))-> 5113 getConstantIntValue(), dl, 5114 N->getValueType(0)); 5115 if (N->getValueType(0) == MVT::i64) { 5116 SDNode *Op = 5117 CurDAG->getMachineNode(PPC::SRADI, dl, MVT::i64, MVT::Glue, 5118 N0, ShiftAmt); 5119 CurDAG->SelectNodeTo(N, PPC::ADDZE8, MVT::i64, SDValue(Op, 0), 5120 SDValue(Op, 1)); 5121 return; 5122 } else { 5123 assert(N->getValueType(0) == MVT::i32 && 5124 "Expecting i64 or i32 in PPCISD::SRA_ADDZE"); 5125 SDNode *Op = 5126 CurDAG->getMachineNode(PPC::SRAWI, dl, MVT::i32, MVT::Glue, 5127 N0, ShiftAmt); 5128 CurDAG->SelectNodeTo(N, PPC::ADDZE, MVT::i32, SDValue(Op, 0), 5129 SDValue(Op, 1)); 5130 return; 5131 } 5132 } 5133 5134 case ISD::STORE: { 5135 // Change TLS initial-exec D-form stores to X-form stores. 5136 StoreSDNode *ST = cast<StoreSDNode>(N); 5137 if (EnableTLSOpt && Subtarget->isELFv2ABI() && 5138 ST->getAddressingMode() != ISD::PRE_INC) 5139 if (tryTLSXFormStore(ST)) 5140 return; 5141 break; 5142 } 5143 case ISD::LOAD: { 5144 // Handle preincrement loads. 5145 LoadSDNode *LD = cast<LoadSDNode>(N); 5146 EVT LoadedVT = LD->getMemoryVT(); 5147 5148 // Normal loads are handled by code generated from the .td file. 5149 if (LD->getAddressingMode() != ISD::PRE_INC) { 5150 // Change TLS initial-exec D-form loads to X-form loads. 5151 if (EnableTLSOpt && Subtarget->isELFv2ABI()) 5152 if (tryTLSXFormLoad(LD)) 5153 return; 5154 break; 5155 } 5156 5157 SDValue Offset = LD->getOffset(); 5158 if (Offset.getOpcode() == ISD::TargetConstant || 5159 Offset.getOpcode() == ISD::TargetGlobalAddress) { 5160 5161 unsigned Opcode; 5162 bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD; 5163 if (LD->getValueType(0) != MVT::i64) { 5164 // Handle PPC32 integer and normal FP loads. 5165 assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load"); 5166 switch (LoadedVT.getSimpleVT().SimpleTy) { 5167 default: llvm_unreachable("Invalid PPC load type!"); 5168 case MVT::f64: Opcode = PPC::LFDU; break; 5169 case MVT::f32: Opcode = PPC::LFSU; break; 5170 case MVT::i32: Opcode = PPC::LWZU; break; 5171 case MVT::i16: Opcode = isSExt ? PPC::LHAU : PPC::LHZU; break; 5172 case MVT::i1: 5173 case MVT::i8: Opcode = PPC::LBZU; break; 5174 } 5175 } else { 5176 assert(LD->getValueType(0) == MVT::i64 && "Unknown load result type!"); 5177 assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load"); 5178 switch (LoadedVT.getSimpleVT().SimpleTy) { 5179 default: llvm_unreachable("Invalid PPC load type!"); 5180 case MVT::i64: Opcode = PPC::LDU; break; 5181 case MVT::i32: Opcode = PPC::LWZU8; break; 5182 case MVT::i16: Opcode = isSExt ? PPC::LHAU8 : PPC::LHZU8; break; 5183 case MVT::i1: 5184 case MVT::i8: Opcode = PPC::LBZU8; break; 5185 } 5186 } 5187 5188 SDValue Chain = LD->getChain(); 5189 SDValue Base = LD->getBasePtr(); 5190 SDValue Ops[] = { Offset, Base, Chain }; 5191 SDNode *MN = CurDAG->getMachineNode( 5192 Opcode, dl, LD->getValueType(0), 5193 PPCLowering->getPointerTy(CurDAG->getDataLayout()), MVT::Other, Ops); 5194 transferMemOperands(N, MN); 5195 ReplaceNode(N, MN); 5196 return; 5197 } else { 5198 unsigned Opcode; 5199 bool isSExt = LD->getExtensionType() == ISD::SEXTLOAD; 5200 if (LD->getValueType(0) != MVT::i64) { 5201 // Handle PPC32 integer and normal FP loads. 5202 assert((!isSExt || LoadedVT == MVT::i16) && "Invalid sext update load"); 5203 switch (LoadedVT.getSimpleVT().SimpleTy) { 5204 default: llvm_unreachable("Invalid PPC load type!"); 5205 case MVT::f64: Opcode = PPC::LFDUX; break; 5206 case MVT::f32: Opcode = PPC::LFSUX; break; 5207 case MVT::i32: Opcode = PPC::LWZUX; break; 5208 case MVT::i16: Opcode = isSExt ? PPC::LHAUX : PPC::LHZUX; break; 5209 case MVT::i1: 5210 case MVT::i8: Opcode = PPC::LBZUX; break; 5211 } 5212 } else { 5213 assert(LD->getValueType(0) == MVT::i64 && "Unknown load result type!"); 5214 assert((!isSExt || LoadedVT == MVT::i16 || LoadedVT == MVT::i32) && 5215 "Invalid sext update load"); 5216 switch (LoadedVT.getSimpleVT().SimpleTy) { 5217 default: llvm_unreachable("Invalid PPC load type!"); 5218 case MVT::i64: Opcode = PPC::LDUX; break; 5219 case MVT::i32: Opcode = isSExt ? PPC::LWAUX : PPC::LWZUX8; break; 5220 case MVT::i16: Opcode = isSExt ? PPC::LHAUX8 : PPC::LHZUX8; break; 5221 case MVT::i1: 5222 case MVT::i8: Opcode = PPC::LBZUX8; break; 5223 } 5224 } 5225 5226 SDValue Chain = LD->getChain(); 5227 SDValue Base = LD->getBasePtr(); 5228 SDValue Ops[] = { Base, Offset, Chain }; 5229 SDNode *MN = CurDAG->getMachineNode( 5230 Opcode, dl, LD->getValueType(0), 5231 PPCLowering->getPointerTy(CurDAG->getDataLayout()), MVT::Other, Ops); 5232 transferMemOperands(N, MN); 5233 ReplaceNode(N, MN); 5234 return; 5235 } 5236 } 5237 5238 case ISD::AND: 5239 // If this is an 'and' with a mask, try to emit rlwinm/rldicl/rldicr 5240 if (tryAsSingleRLWINM(N) || tryAsSingleRLWIMI(N) || tryAsSingleRLDICL(N) || 5241 tryAsSingleRLDICR(N) || tryAsSingleRLWINM8(N) || tryAsPairOfRLDICL(N)) 5242 return; 5243 5244 // Other cases are autogenerated. 5245 break; 5246 case ISD::OR: { 5247 if (N->getValueType(0) == MVT::i32) 5248 if (tryBitfieldInsert(N)) 5249 return; 5250 5251 int16_t Imm; 5252 if (N->getOperand(0)->getOpcode() == ISD::FrameIndex && 5253 isIntS16Immediate(N->getOperand(1), Imm)) { 5254 KnownBits LHSKnown = CurDAG->computeKnownBits(N->getOperand(0)); 5255 5256 // If this is equivalent to an add, then we can fold it with the 5257 // FrameIndex calculation. 5258 if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)Imm) == ~0ULL) { 5259 selectFrameIndex(N, N->getOperand(0).getNode(), (int)Imm); 5260 return; 5261 } 5262 } 5263 5264 // If this is 'or' against an imm with consecutive ones and both sides zero, 5265 // try to emit rldimi 5266 if (tryAsSingleRLDIMI(N)) 5267 return; 5268 5269 // OR with a 32-bit immediate can be handled by ori + oris 5270 // without creating an immediate in a GPR. 5271 uint64_t Imm64 = 0; 5272 bool IsPPC64 = Subtarget->isPPC64(); 5273 if (IsPPC64 && isInt64Immediate(N->getOperand(1), Imm64) && 5274 (Imm64 & ~0xFFFFFFFFuLL) == 0) { 5275 // If ImmHi (ImmHi) is zero, only one ori (oris) is generated later. 5276 uint64_t ImmHi = Imm64 >> 16; 5277 uint64_t ImmLo = Imm64 & 0xFFFF; 5278 if (ImmHi != 0 && ImmLo != 0) { 5279 SDNode *Lo = CurDAG->getMachineNode(PPC::ORI8, dl, MVT::i64, 5280 N->getOperand(0), 5281 getI16Imm(ImmLo, dl)); 5282 SDValue Ops1[] = { SDValue(Lo, 0), getI16Imm(ImmHi, dl)}; 5283 CurDAG->SelectNodeTo(N, PPC::ORIS8, MVT::i64, Ops1); 5284 return; 5285 } 5286 } 5287 5288 // Other cases are autogenerated. 5289 break; 5290 } 5291 case ISD::XOR: { 5292 // XOR with a 32-bit immediate can be handled by xori + xoris 5293 // without creating an immediate in a GPR. 5294 uint64_t Imm64 = 0; 5295 bool IsPPC64 = Subtarget->isPPC64(); 5296 if (IsPPC64 && isInt64Immediate(N->getOperand(1), Imm64) && 5297 (Imm64 & ~0xFFFFFFFFuLL) == 0) { 5298 // If ImmHi (ImmHi) is zero, only one xori (xoris) is generated later. 5299 uint64_t ImmHi = Imm64 >> 16; 5300 uint64_t ImmLo = Imm64 & 0xFFFF; 5301 if (ImmHi != 0 && ImmLo != 0) { 5302 SDNode *Lo = CurDAG->getMachineNode(PPC::XORI8, dl, MVT::i64, 5303 N->getOperand(0), 5304 getI16Imm(ImmLo, dl)); 5305 SDValue Ops1[] = { SDValue(Lo, 0), getI16Imm(ImmHi, dl)}; 5306 CurDAG->SelectNodeTo(N, PPC::XORIS8, MVT::i64, Ops1); 5307 return; 5308 } 5309 } 5310 5311 break; 5312 } 5313 case ISD::ADD: { 5314 int16_t Imm; 5315 if (N->getOperand(0)->getOpcode() == ISD::FrameIndex && 5316 isIntS16Immediate(N->getOperand(1), Imm)) { 5317 selectFrameIndex(N, N->getOperand(0).getNode(), (int)Imm); 5318 return; 5319 } 5320 5321 break; 5322 } 5323 case ISD::SHL: { 5324 unsigned Imm, SH, MB, ME; 5325 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, Imm) && 5326 isRotateAndMask(N, Imm, true, SH, MB, ME)) { 5327 SDValue Ops[] = { N->getOperand(0).getOperand(0), 5328 getI32Imm(SH, dl), getI32Imm(MB, dl), 5329 getI32Imm(ME, dl) }; 5330 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 5331 return; 5332 } 5333 5334 // Other cases are autogenerated. 5335 break; 5336 } 5337 case ISD::SRL: { 5338 unsigned Imm, SH, MB, ME; 5339 if (isOpcWithIntImmediate(N->getOperand(0).getNode(), ISD::AND, Imm) && 5340 isRotateAndMask(N, Imm, true, SH, MB, ME)) { 5341 SDValue Ops[] = { N->getOperand(0).getOperand(0), 5342 getI32Imm(SH, dl), getI32Imm(MB, dl), 5343 getI32Imm(ME, dl) }; 5344 CurDAG->SelectNodeTo(N, PPC::RLWINM, MVT::i32, Ops); 5345 return; 5346 } 5347 5348 // Other cases are autogenerated. 5349 break; 5350 } 5351 case ISD::MUL: { 5352 SDValue Op1 = N->getOperand(1); 5353 if (Op1.getOpcode() != ISD::Constant || Op1.getValueType() != MVT::i64) 5354 break; 5355 5356 // If the multiplier fits int16, we can handle it with mulli. 5357 int64_t Imm = cast<ConstantSDNode>(Op1)->getZExtValue(); 5358 unsigned Shift = countTrailingZeros<uint64_t>(Imm); 5359 if (isInt<16>(Imm) || !Shift) 5360 break; 5361 5362 // If the shifted value fits int16, we can do this transformation: 5363 // (mul X, c1 << c2) -> (rldicr (mulli X, c1) c2). We do this in ISEL due to 5364 // DAGCombiner prefers (shl (mul X, c1), c2) -> (mul X, c1 << c2). 5365 uint64_t ImmSh = Imm >> Shift; 5366 if (isInt<16>(ImmSh)) { 5367 uint64_t SextImm = SignExtend64(ImmSh & 0xFFFF, 16); 5368 SDValue SDImm = CurDAG->getTargetConstant(SextImm, dl, MVT::i64); 5369 SDNode *MulNode = CurDAG->getMachineNode(PPC::MULLI8, dl, MVT::i64, 5370 N->getOperand(0), SDImm); 5371 CurDAG->SelectNodeTo(N, PPC::RLDICR, MVT::i64, SDValue(MulNode, 0), 5372 getI32Imm(Shift, dl), getI32Imm(63 - Shift, dl)); 5373 return; 5374 } 5375 break; 5376 } 5377 // FIXME: Remove this once the ANDI glue bug is fixed: 5378 case PPCISD::ANDI_rec_1_EQ_BIT: 5379 case PPCISD::ANDI_rec_1_GT_BIT: { 5380 if (!ANDIGlueBug) 5381 break; 5382 5383 EVT InVT = N->getOperand(0).getValueType(); 5384 assert((InVT == MVT::i64 || InVT == MVT::i32) && 5385 "Invalid input type for ANDI_rec_1_EQ_BIT"); 5386 5387 unsigned Opcode = (InVT == MVT::i64) ? PPC::ANDI8_rec : PPC::ANDI_rec; 5388 SDValue AndI(CurDAG->getMachineNode(Opcode, dl, InVT, MVT::Glue, 5389 N->getOperand(0), 5390 CurDAG->getTargetConstant(1, dl, InVT)), 5391 0); 5392 SDValue CR0Reg = CurDAG->getRegister(PPC::CR0, MVT::i32); 5393 SDValue SRIdxVal = CurDAG->getTargetConstant( 5394 N->getOpcode() == PPCISD::ANDI_rec_1_EQ_BIT ? PPC::sub_eq : PPC::sub_gt, 5395 dl, MVT::i32); 5396 5397 CurDAG->SelectNodeTo(N, TargetOpcode::EXTRACT_SUBREG, MVT::i1, CR0Reg, 5398 SRIdxVal, SDValue(AndI.getNode(), 1) /* glue */); 5399 return; 5400 } 5401 case ISD::SELECT_CC: { 5402 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 5403 EVT PtrVT = 5404 CurDAG->getTargetLoweringInfo().getPointerTy(CurDAG->getDataLayout()); 5405 bool isPPC64 = (PtrVT == MVT::i64); 5406 5407 // If this is a select of i1 operands, we'll pattern match it. 5408 if (Subtarget->useCRBits() && N->getOperand(0).getValueType() == MVT::i1) 5409 break; 5410 5411 if (Subtarget->isISA3_0() && Subtarget->isPPC64()) { 5412 bool NeedSwapOps = false; 5413 bool IsUnCmp = false; 5414 if (mayUseP9Setb(N, CC, CurDAG, NeedSwapOps, IsUnCmp)) { 5415 SDValue LHS = N->getOperand(0); 5416 SDValue RHS = N->getOperand(1); 5417 if (NeedSwapOps) 5418 std::swap(LHS, RHS); 5419 5420 // Make use of SelectCC to generate the comparison to set CR bits, for 5421 // equality comparisons having one literal operand, SelectCC probably 5422 // doesn't need to materialize the whole literal and just use xoris to 5423 // check it first, it leads the following comparison result can't 5424 // exactly represent GT/LT relationship. So to avoid this we specify 5425 // SETGT/SETUGT here instead of SETEQ. 5426 SDValue GenCC = 5427 SelectCC(LHS, RHS, IsUnCmp ? ISD::SETUGT : ISD::SETGT, dl); 5428 CurDAG->SelectNodeTo( 5429 N, N->getSimpleValueType(0) == MVT::i64 ? PPC::SETB8 : PPC::SETB, 5430 N->getValueType(0), GenCC); 5431 NumP9Setb++; 5432 return; 5433 } 5434 } 5435 5436 // Handle the setcc cases here. select_cc lhs, 0, 1, 0, cc 5437 if (!isPPC64) 5438 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1))) 5439 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N->getOperand(2))) 5440 if (ConstantSDNode *N3C = dyn_cast<ConstantSDNode>(N->getOperand(3))) 5441 if (N1C->isZero() && N3C->isZero() && N2C->getZExtValue() == 1ULL && 5442 CC == ISD::SETNE && 5443 // FIXME: Implement this optzn for PPC64. 5444 N->getValueType(0) == MVT::i32) { 5445 SDNode *Tmp = 5446 CurDAG->getMachineNode(PPC::ADDIC, dl, MVT::i32, MVT::Glue, 5447 N->getOperand(0), getI32Imm(~0U, dl)); 5448 CurDAG->SelectNodeTo(N, PPC::SUBFE, MVT::i32, SDValue(Tmp, 0), 5449 N->getOperand(0), SDValue(Tmp, 1)); 5450 return; 5451 } 5452 5453 SDValue CCReg = SelectCC(N->getOperand(0), N->getOperand(1), CC, dl); 5454 5455 if (N->getValueType(0) == MVT::i1) { 5456 // An i1 select is: (c & t) | (!c & f). 5457 bool Inv; 5458 unsigned Idx = getCRIdxForSetCC(CC, Inv); 5459 5460 unsigned SRI; 5461 switch (Idx) { 5462 default: llvm_unreachable("Invalid CC index"); 5463 case 0: SRI = PPC::sub_lt; break; 5464 case 1: SRI = PPC::sub_gt; break; 5465 case 2: SRI = PPC::sub_eq; break; 5466 case 3: SRI = PPC::sub_un; break; 5467 } 5468 5469 SDValue CCBit = CurDAG->getTargetExtractSubreg(SRI, dl, MVT::i1, CCReg); 5470 5471 SDValue NotCCBit(CurDAG->getMachineNode(PPC::CRNOR, dl, MVT::i1, 5472 CCBit, CCBit), 0); 5473 SDValue C = Inv ? NotCCBit : CCBit, 5474 NotC = Inv ? CCBit : NotCCBit; 5475 5476 SDValue CAndT(CurDAG->getMachineNode(PPC::CRAND, dl, MVT::i1, 5477 C, N->getOperand(2)), 0); 5478 SDValue NotCAndF(CurDAG->getMachineNode(PPC::CRAND, dl, MVT::i1, 5479 NotC, N->getOperand(3)), 0); 5480 5481 CurDAG->SelectNodeTo(N, PPC::CROR, MVT::i1, CAndT, NotCAndF); 5482 return; 5483 } 5484 5485 unsigned BROpc = 5486 getPredicateForSetCC(CC, N->getOperand(0).getValueType(), Subtarget); 5487 5488 unsigned SelectCCOp; 5489 if (N->getValueType(0) == MVT::i32) 5490 SelectCCOp = PPC::SELECT_CC_I4; 5491 else if (N->getValueType(0) == MVT::i64) 5492 SelectCCOp = PPC::SELECT_CC_I8; 5493 else if (N->getValueType(0) == MVT::f32) { 5494 if (Subtarget->hasP8Vector()) 5495 SelectCCOp = PPC::SELECT_CC_VSSRC; 5496 else if (Subtarget->hasSPE()) 5497 SelectCCOp = PPC::SELECT_CC_SPE4; 5498 else 5499 SelectCCOp = PPC::SELECT_CC_F4; 5500 } else if (N->getValueType(0) == MVT::f64) { 5501 if (Subtarget->hasVSX()) 5502 SelectCCOp = PPC::SELECT_CC_VSFRC; 5503 else if (Subtarget->hasSPE()) 5504 SelectCCOp = PPC::SELECT_CC_SPE; 5505 else 5506 SelectCCOp = PPC::SELECT_CC_F8; 5507 } else if (N->getValueType(0) == MVT::f128) 5508 SelectCCOp = PPC::SELECT_CC_F16; 5509 else if (Subtarget->hasSPE()) 5510 SelectCCOp = PPC::SELECT_CC_SPE; 5511 else if (N->getValueType(0) == MVT::v2f64 || 5512 N->getValueType(0) == MVT::v2i64) 5513 SelectCCOp = PPC::SELECT_CC_VSRC; 5514 else 5515 SelectCCOp = PPC::SELECT_CC_VRRC; 5516 5517 SDValue Ops[] = { CCReg, N->getOperand(2), N->getOperand(3), 5518 getI32Imm(BROpc, dl) }; 5519 CurDAG->SelectNodeTo(N, SelectCCOp, N->getValueType(0), Ops); 5520 return; 5521 } 5522 case ISD::VECTOR_SHUFFLE: 5523 if (Subtarget->hasVSX() && (N->getValueType(0) == MVT::v2f64 || 5524 N->getValueType(0) == MVT::v2i64)) { 5525 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); 5526 5527 SDValue Op1 = N->getOperand(SVN->getMaskElt(0) < 2 ? 0 : 1), 5528 Op2 = N->getOperand(SVN->getMaskElt(1) < 2 ? 0 : 1); 5529 unsigned DM[2]; 5530 5531 for (int i = 0; i < 2; ++i) 5532 if (SVN->getMaskElt(i) <= 0 || SVN->getMaskElt(i) == 2) 5533 DM[i] = 0; 5534 else 5535 DM[i] = 1; 5536 5537 if (Op1 == Op2 && DM[0] == 0 && DM[1] == 0 && 5538 Op1.getOpcode() == ISD::SCALAR_TO_VECTOR && 5539 isa<LoadSDNode>(Op1.getOperand(0))) { 5540 LoadSDNode *LD = cast<LoadSDNode>(Op1.getOperand(0)); 5541 SDValue Base, Offset; 5542 5543 if (LD->isUnindexed() && LD->hasOneUse() && Op1.hasOneUse() && 5544 (LD->getMemoryVT() == MVT::f64 || 5545 LD->getMemoryVT() == MVT::i64) && 5546 SelectAddrIdxOnly(LD->getBasePtr(), Base, Offset)) { 5547 SDValue Chain = LD->getChain(); 5548 SDValue Ops[] = { Base, Offset, Chain }; 5549 MachineMemOperand *MemOp = LD->getMemOperand(); 5550 SDNode *NewN = CurDAG->SelectNodeTo(N, PPC::LXVDSX, 5551 N->getValueType(0), Ops); 5552 CurDAG->setNodeMemRefs(cast<MachineSDNode>(NewN), {MemOp}); 5553 return; 5554 } 5555 } 5556 5557 // For little endian, we must swap the input operands and adjust 5558 // the mask elements (reverse and invert them). 5559 if (Subtarget->isLittleEndian()) { 5560 std::swap(Op1, Op2); 5561 unsigned tmp = DM[0]; 5562 DM[0] = 1 - DM[1]; 5563 DM[1] = 1 - tmp; 5564 } 5565 5566 SDValue DMV = CurDAG->getTargetConstant(DM[1] | (DM[0] << 1), dl, 5567 MVT::i32); 5568 SDValue Ops[] = { Op1, Op2, DMV }; 5569 CurDAG->SelectNodeTo(N, PPC::XXPERMDI, N->getValueType(0), Ops); 5570 return; 5571 } 5572 5573 break; 5574 case PPCISD::BDNZ: 5575 case PPCISD::BDZ: { 5576 bool IsPPC64 = Subtarget->isPPC64(); 5577 SDValue Ops[] = { N->getOperand(1), N->getOperand(0) }; 5578 CurDAG->SelectNodeTo(N, N->getOpcode() == PPCISD::BDNZ 5579 ? (IsPPC64 ? PPC::BDNZ8 : PPC::BDNZ) 5580 : (IsPPC64 ? PPC::BDZ8 : PPC::BDZ), 5581 MVT::Other, Ops); 5582 return; 5583 } 5584 case PPCISD::COND_BRANCH: { 5585 // Op #0 is the Chain. 5586 // Op #1 is the PPC::PRED_* number. 5587 // Op #2 is the CR# 5588 // Op #3 is the Dest MBB 5589 // Op #4 is the Flag. 5590 // Prevent PPC::PRED_* from being selected into LI. 5591 unsigned PCC = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 5592 if (EnableBranchHint) 5593 PCC |= getBranchHint(PCC, *FuncInfo, N->getOperand(3)); 5594 5595 SDValue Pred = getI32Imm(PCC, dl); 5596 SDValue Ops[] = { Pred, N->getOperand(2), N->getOperand(3), 5597 N->getOperand(0), N->getOperand(4) }; 5598 CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops); 5599 return; 5600 } 5601 case ISD::BR_CC: { 5602 if (tryFoldSWTestBRCC(N)) 5603 return; 5604 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get(); 5605 unsigned PCC = 5606 getPredicateForSetCC(CC, N->getOperand(2).getValueType(), Subtarget); 5607 5608 if (N->getOperand(2).getValueType() == MVT::i1) { 5609 unsigned Opc; 5610 bool Swap; 5611 switch (PCC) { 5612 default: llvm_unreachable("Unexpected Boolean-operand predicate"); 5613 case PPC::PRED_LT: Opc = PPC::CRANDC; Swap = true; break; 5614 case PPC::PRED_LE: Opc = PPC::CRORC; Swap = true; break; 5615 case PPC::PRED_EQ: Opc = PPC::CREQV; Swap = false; break; 5616 case PPC::PRED_GE: Opc = PPC::CRORC; Swap = false; break; 5617 case PPC::PRED_GT: Opc = PPC::CRANDC; Swap = false; break; 5618 case PPC::PRED_NE: Opc = PPC::CRXOR; Swap = false; break; 5619 } 5620 5621 // A signed comparison of i1 values produces the opposite result to an 5622 // unsigned one if the condition code includes less-than or greater-than. 5623 // This is because 1 is the most negative signed i1 number and the most 5624 // positive unsigned i1 number. The CR-logical operations used for such 5625 // comparisons are non-commutative so for signed comparisons vs. unsigned 5626 // ones, the input operands just need to be swapped. 5627 if (ISD::isSignedIntSetCC(CC)) 5628 Swap = !Swap; 5629 5630 SDValue BitComp(CurDAG->getMachineNode(Opc, dl, MVT::i1, 5631 N->getOperand(Swap ? 3 : 2), 5632 N->getOperand(Swap ? 2 : 3)), 0); 5633 CurDAG->SelectNodeTo(N, PPC::BC, MVT::Other, BitComp, N->getOperand(4), 5634 N->getOperand(0)); 5635 return; 5636 } 5637 5638 if (EnableBranchHint) 5639 PCC |= getBranchHint(PCC, *FuncInfo, N->getOperand(4)); 5640 5641 SDValue CondCode = SelectCC(N->getOperand(2), N->getOperand(3), CC, dl); 5642 SDValue Ops[] = { getI32Imm(PCC, dl), CondCode, 5643 N->getOperand(4), N->getOperand(0) }; 5644 CurDAG->SelectNodeTo(N, PPC::BCC, MVT::Other, Ops); 5645 return; 5646 } 5647 case ISD::BRIND: { 5648 // FIXME: Should custom lower this. 5649 SDValue Chain = N->getOperand(0); 5650 SDValue Target = N->getOperand(1); 5651 unsigned Opc = Target.getValueType() == MVT::i32 ? PPC::MTCTR : PPC::MTCTR8; 5652 unsigned Reg = Target.getValueType() == MVT::i32 ? PPC::BCTR : PPC::BCTR8; 5653 Chain = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, Target, 5654 Chain), 0); 5655 CurDAG->SelectNodeTo(N, Reg, MVT::Other, Chain); 5656 return; 5657 } 5658 case PPCISD::TOC_ENTRY: { 5659 const bool isPPC64 = Subtarget->isPPC64(); 5660 const bool isELFABI = Subtarget->isSVR4ABI(); 5661 const bool isAIXABI = Subtarget->isAIXABI(); 5662 5663 // PowerPC only support small, medium and large code model. 5664 const CodeModel::Model CModel = TM.getCodeModel(); 5665 assert(!(CModel == CodeModel::Tiny || CModel == CodeModel::Kernel) && 5666 "PowerPC doesn't support tiny or kernel code models."); 5667 5668 if (isAIXABI && CModel == CodeModel::Medium) 5669 report_fatal_error("Medium code model is not supported on AIX."); 5670 5671 // For 64-bit small code model, we allow SelectCodeCommon to handle this, 5672 // selecting one of LDtoc, LDtocJTI, LDtocCPT, and LDtocBA. 5673 if (isPPC64 && CModel == CodeModel::Small) 5674 break; 5675 5676 // Handle 32-bit small code model. 5677 if (!isPPC64) { 5678 // Transforms the ISD::TOC_ENTRY node to passed in Opcode, either 5679 // PPC::ADDItoc, or PPC::LWZtoc 5680 auto replaceWith = [this, &dl](unsigned OpCode, SDNode *TocEntry) { 5681 SDValue GA = TocEntry->getOperand(0); 5682 SDValue TocBase = TocEntry->getOperand(1); 5683 SDNode *MN = CurDAG->getMachineNode(OpCode, dl, MVT::i32, GA, TocBase); 5684 transferMemOperands(TocEntry, MN); 5685 ReplaceNode(TocEntry, MN); 5686 }; 5687 5688 if (isELFABI) { 5689 assert(TM.isPositionIndependent() && 5690 "32-bit ELF can only have TOC entries in position independent" 5691 " code."); 5692 // 32-bit ELF always uses a small code model toc access. 5693 replaceWith(PPC::LWZtoc, N); 5694 return; 5695 } 5696 5697 if (isAIXABI && CModel == CodeModel::Small) { 5698 if (hasTocDataAttr(N->getOperand(0), 5699 CurDAG->getDataLayout().getPointerSize())) 5700 replaceWith(PPC::ADDItoc, N); 5701 else 5702 replaceWith(PPC::LWZtoc, N); 5703 5704 return; 5705 } 5706 } 5707 5708 assert(CModel != CodeModel::Small && "All small code models handled."); 5709 5710 assert((isPPC64 || (isAIXABI && !isPPC64)) && "We are dealing with 64-bit" 5711 " ELF/AIX or 32-bit AIX in the following."); 5712 5713 // Transforms the ISD::TOC_ENTRY node for 32-bit AIX large code model mode 5714 // or 64-bit medium (ELF-only) or large (ELF and AIX) code model code. We 5715 // generate two instructions as described below. The first source operand 5716 // is a symbol reference. If it must be toc-referenced according to 5717 // Subtarget, we generate: 5718 // [32-bit AIX] 5719 // LWZtocL(@sym, ADDIStocHA(%r2, @sym)) 5720 // [64-bit ELF/AIX] 5721 // LDtocL(@sym, ADDIStocHA8(%x2, @sym)) 5722 // Otherwise we generate: 5723 // ADDItocL(ADDIStocHA8(%x2, @sym), @sym) 5724 SDValue GA = N->getOperand(0); 5725 SDValue TOCbase = N->getOperand(1); 5726 5727 EVT VT = isPPC64 ? MVT::i64 : MVT::i32; 5728 SDNode *Tmp = CurDAG->getMachineNode( 5729 isPPC64 ? PPC::ADDIStocHA8 : PPC::ADDIStocHA, dl, VT, TOCbase, GA); 5730 5731 if (PPCLowering->isAccessedAsGotIndirect(GA)) { 5732 // If it is accessed as got-indirect, we need an extra LWZ/LD to load 5733 // the address. 5734 SDNode *MN = CurDAG->getMachineNode( 5735 isPPC64 ? PPC::LDtocL : PPC::LWZtocL, dl, VT, GA, SDValue(Tmp, 0)); 5736 5737 transferMemOperands(N, MN); 5738 ReplaceNode(N, MN); 5739 return; 5740 } 5741 5742 // Build the address relative to the TOC-pointer. 5743 ReplaceNode(N, CurDAG->getMachineNode(PPC::ADDItocL, dl, MVT::i64, 5744 SDValue(Tmp, 0), GA)); 5745 return; 5746 } 5747 case PPCISD::PPC32_PICGOT: 5748 // Generate a PIC-safe GOT reference. 5749 assert(Subtarget->is32BitELFABI() && 5750 "PPCISD::PPC32_PICGOT is only supported for 32-bit SVR4"); 5751 CurDAG->SelectNodeTo(N, PPC::PPC32PICGOT, 5752 PPCLowering->getPointerTy(CurDAG->getDataLayout()), 5753 MVT::i32); 5754 return; 5755 5756 case PPCISD::VADD_SPLAT: { 5757 // This expands into one of three sequences, depending on whether 5758 // the first operand is odd or even, positive or negative. 5759 assert(isa<ConstantSDNode>(N->getOperand(0)) && 5760 isa<ConstantSDNode>(N->getOperand(1)) && 5761 "Invalid operand on VADD_SPLAT!"); 5762 5763 int Elt = N->getConstantOperandVal(0); 5764 int EltSize = N->getConstantOperandVal(1); 5765 unsigned Opc1, Opc2, Opc3; 5766 EVT VT; 5767 5768 if (EltSize == 1) { 5769 Opc1 = PPC::VSPLTISB; 5770 Opc2 = PPC::VADDUBM; 5771 Opc3 = PPC::VSUBUBM; 5772 VT = MVT::v16i8; 5773 } else if (EltSize == 2) { 5774 Opc1 = PPC::VSPLTISH; 5775 Opc2 = PPC::VADDUHM; 5776 Opc3 = PPC::VSUBUHM; 5777 VT = MVT::v8i16; 5778 } else { 5779 assert(EltSize == 4 && "Invalid element size on VADD_SPLAT!"); 5780 Opc1 = PPC::VSPLTISW; 5781 Opc2 = PPC::VADDUWM; 5782 Opc3 = PPC::VSUBUWM; 5783 VT = MVT::v4i32; 5784 } 5785 5786 if ((Elt & 1) == 0) { 5787 // Elt is even, in the range [-32,-18] + [16,30]. 5788 // 5789 // Convert: VADD_SPLAT elt, size 5790 // Into: tmp = VSPLTIS[BHW] elt 5791 // VADDU[BHW]M tmp, tmp 5792 // Where: [BHW] = B for size = 1, H for size = 2, W for size = 4 5793 SDValue EltVal = getI32Imm(Elt >> 1, dl); 5794 SDNode *Tmp = CurDAG->getMachineNode(Opc1, dl, VT, EltVal); 5795 SDValue TmpVal = SDValue(Tmp, 0); 5796 ReplaceNode(N, CurDAG->getMachineNode(Opc2, dl, VT, TmpVal, TmpVal)); 5797 return; 5798 } else if (Elt > 0) { 5799 // Elt is odd and positive, in the range [17,31]. 5800 // 5801 // Convert: VADD_SPLAT elt, size 5802 // Into: tmp1 = VSPLTIS[BHW] elt-16 5803 // tmp2 = VSPLTIS[BHW] -16 5804 // VSUBU[BHW]M tmp1, tmp2 5805 SDValue EltVal = getI32Imm(Elt - 16, dl); 5806 SDNode *Tmp1 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal); 5807 EltVal = getI32Imm(-16, dl); 5808 SDNode *Tmp2 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal); 5809 ReplaceNode(N, CurDAG->getMachineNode(Opc3, dl, VT, SDValue(Tmp1, 0), 5810 SDValue(Tmp2, 0))); 5811 return; 5812 } else { 5813 // Elt is odd and negative, in the range [-31,-17]. 5814 // 5815 // Convert: VADD_SPLAT elt, size 5816 // Into: tmp1 = VSPLTIS[BHW] elt+16 5817 // tmp2 = VSPLTIS[BHW] -16 5818 // VADDU[BHW]M tmp1, tmp2 5819 SDValue EltVal = getI32Imm(Elt + 16, dl); 5820 SDNode *Tmp1 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal); 5821 EltVal = getI32Imm(-16, dl); 5822 SDNode *Tmp2 = CurDAG->getMachineNode(Opc1, dl, VT, EltVal); 5823 ReplaceNode(N, CurDAG->getMachineNode(Opc2, dl, VT, SDValue(Tmp1, 0), 5824 SDValue(Tmp2, 0))); 5825 return; 5826 } 5827 } 5828 } 5829 5830 SelectCode(N); 5831 } 5832 5833 // If the target supports the cmpb instruction, do the idiom recognition here. 5834 // We don't do this as a DAG combine because we don't want to do it as nodes 5835 // are being combined (because we might miss part of the eventual idiom). We 5836 // don't want to do it during instruction selection because we want to reuse 5837 // the logic for lowering the masking operations already part of the 5838 // instruction selector. 5839 SDValue PPCDAGToDAGISel::combineToCMPB(SDNode *N) { 5840 SDLoc dl(N); 5841 5842 assert(N->getOpcode() == ISD::OR && 5843 "Only OR nodes are supported for CMPB"); 5844 5845 SDValue Res; 5846 if (!Subtarget->hasCMPB()) 5847 return Res; 5848 5849 if (N->getValueType(0) != MVT::i32 && 5850 N->getValueType(0) != MVT::i64) 5851 return Res; 5852 5853 EVT VT = N->getValueType(0); 5854 5855 SDValue RHS, LHS; 5856 bool BytesFound[8] = {false, false, false, false, false, false, false, false}; 5857 uint64_t Mask = 0, Alt = 0; 5858 5859 auto IsByteSelectCC = [this](SDValue O, unsigned &b, 5860 uint64_t &Mask, uint64_t &Alt, 5861 SDValue &LHS, SDValue &RHS) { 5862 if (O.getOpcode() != ISD::SELECT_CC) 5863 return false; 5864 ISD::CondCode CC = cast<CondCodeSDNode>(O.getOperand(4))->get(); 5865 5866 if (!isa<ConstantSDNode>(O.getOperand(2)) || 5867 !isa<ConstantSDNode>(O.getOperand(3))) 5868 return false; 5869 5870 uint64_t PM = O.getConstantOperandVal(2); 5871 uint64_t PAlt = O.getConstantOperandVal(3); 5872 for (b = 0; b < 8; ++b) { 5873 uint64_t Mask = UINT64_C(0xFF) << (8*b); 5874 if (PM && (PM & Mask) == PM && (PAlt & Mask) == PAlt) 5875 break; 5876 } 5877 5878 if (b == 8) 5879 return false; 5880 Mask |= PM; 5881 Alt |= PAlt; 5882 5883 if (!isa<ConstantSDNode>(O.getOperand(1)) || 5884 O.getConstantOperandVal(1) != 0) { 5885 SDValue Op0 = O.getOperand(0), Op1 = O.getOperand(1); 5886 if (Op0.getOpcode() == ISD::TRUNCATE) 5887 Op0 = Op0.getOperand(0); 5888 if (Op1.getOpcode() == ISD::TRUNCATE) 5889 Op1 = Op1.getOperand(0); 5890 5891 if (Op0.getOpcode() == ISD::SRL && Op1.getOpcode() == ISD::SRL && 5892 Op0.getOperand(1) == Op1.getOperand(1) && CC == ISD::SETEQ && 5893 isa<ConstantSDNode>(Op0.getOperand(1))) { 5894 5895 unsigned Bits = Op0.getValueSizeInBits(); 5896 if (b != Bits/8-1) 5897 return false; 5898 if (Op0.getConstantOperandVal(1) != Bits-8) 5899 return false; 5900 5901 LHS = Op0.getOperand(0); 5902 RHS = Op1.getOperand(0); 5903 return true; 5904 } 5905 5906 // When we have small integers (i16 to be specific), the form present 5907 // post-legalization uses SETULT in the SELECT_CC for the 5908 // higher-order byte, depending on the fact that the 5909 // even-higher-order bytes are known to all be zero, for example: 5910 // select_cc (xor $lhs, $rhs), 256, 65280, 0, setult 5911 // (so when the second byte is the same, because all higher-order 5912 // bits from bytes 3 and 4 are known to be zero, the result of the 5913 // xor can be at most 255) 5914 if (Op0.getOpcode() == ISD::XOR && CC == ISD::SETULT && 5915 isa<ConstantSDNode>(O.getOperand(1))) { 5916 5917 uint64_t ULim = O.getConstantOperandVal(1); 5918 if (ULim != (UINT64_C(1) << b*8)) 5919 return false; 5920 5921 // Now we need to make sure that the upper bytes are known to be 5922 // zero. 5923 unsigned Bits = Op0.getValueSizeInBits(); 5924 if (!CurDAG->MaskedValueIsZero( 5925 Op0, APInt::getHighBitsSet(Bits, Bits - (b + 1) * 8))) 5926 return false; 5927 5928 LHS = Op0.getOperand(0); 5929 RHS = Op0.getOperand(1); 5930 return true; 5931 } 5932 5933 return false; 5934 } 5935 5936 if (CC != ISD::SETEQ) 5937 return false; 5938 5939 SDValue Op = O.getOperand(0); 5940 if (Op.getOpcode() == ISD::AND) { 5941 if (!isa<ConstantSDNode>(Op.getOperand(1))) 5942 return false; 5943 if (Op.getConstantOperandVal(1) != (UINT64_C(0xFF) << (8*b))) 5944 return false; 5945 5946 SDValue XOR = Op.getOperand(0); 5947 if (XOR.getOpcode() == ISD::TRUNCATE) 5948 XOR = XOR.getOperand(0); 5949 if (XOR.getOpcode() != ISD::XOR) 5950 return false; 5951 5952 LHS = XOR.getOperand(0); 5953 RHS = XOR.getOperand(1); 5954 return true; 5955 } else if (Op.getOpcode() == ISD::SRL) { 5956 if (!isa<ConstantSDNode>(Op.getOperand(1))) 5957 return false; 5958 unsigned Bits = Op.getValueSizeInBits(); 5959 if (b != Bits/8-1) 5960 return false; 5961 if (Op.getConstantOperandVal(1) != Bits-8) 5962 return false; 5963 5964 SDValue XOR = Op.getOperand(0); 5965 if (XOR.getOpcode() == ISD::TRUNCATE) 5966 XOR = XOR.getOperand(0); 5967 if (XOR.getOpcode() != ISD::XOR) 5968 return false; 5969 5970 LHS = XOR.getOperand(0); 5971 RHS = XOR.getOperand(1); 5972 return true; 5973 } 5974 5975 return false; 5976 }; 5977 5978 SmallVector<SDValue, 8> Queue(1, SDValue(N, 0)); 5979 while (!Queue.empty()) { 5980 SDValue V = Queue.pop_back_val(); 5981 5982 for (const SDValue &O : V.getNode()->ops()) { 5983 unsigned b = 0; 5984 uint64_t M = 0, A = 0; 5985 SDValue OLHS, ORHS; 5986 if (O.getOpcode() == ISD::OR) { 5987 Queue.push_back(O); 5988 } else if (IsByteSelectCC(O, b, M, A, OLHS, ORHS)) { 5989 if (!LHS) { 5990 LHS = OLHS; 5991 RHS = ORHS; 5992 BytesFound[b] = true; 5993 Mask |= M; 5994 Alt |= A; 5995 } else if ((LHS == ORHS && RHS == OLHS) || 5996 (RHS == ORHS && LHS == OLHS)) { 5997 BytesFound[b] = true; 5998 Mask |= M; 5999 Alt |= A; 6000 } else { 6001 return Res; 6002 } 6003 } else { 6004 return Res; 6005 } 6006 } 6007 } 6008 6009 unsigned LastB = 0, BCnt = 0; 6010 for (unsigned i = 0; i < 8; ++i) 6011 if (BytesFound[LastB]) { 6012 ++BCnt; 6013 LastB = i; 6014 } 6015 6016 if (!LastB || BCnt < 2) 6017 return Res; 6018 6019 // Because we'll be zero-extending the output anyway if don't have a specific 6020 // value for each input byte (via the Mask), we can 'anyext' the inputs. 6021 if (LHS.getValueType() != VT) { 6022 LHS = CurDAG->getAnyExtOrTrunc(LHS, dl, VT); 6023 RHS = CurDAG->getAnyExtOrTrunc(RHS, dl, VT); 6024 } 6025 6026 Res = CurDAG->getNode(PPCISD::CMPB, dl, VT, LHS, RHS); 6027 6028 bool NonTrivialMask = ((int64_t) Mask) != INT64_C(-1); 6029 if (NonTrivialMask && !Alt) { 6030 // Res = Mask & CMPB 6031 Res = CurDAG->getNode(ISD::AND, dl, VT, Res, 6032 CurDAG->getConstant(Mask, dl, VT)); 6033 } else if (Alt) { 6034 // Res = (CMPB & Mask) | (~CMPB & Alt) 6035 // Which, as suggested here: 6036 // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge 6037 // can be written as: 6038 // Res = Alt ^ ((Alt ^ Mask) & CMPB) 6039 // useful because the (Alt ^ Mask) can be pre-computed. 6040 Res = CurDAG->getNode(ISD::AND, dl, VT, Res, 6041 CurDAG->getConstant(Mask ^ Alt, dl, VT)); 6042 Res = CurDAG->getNode(ISD::XOR, dl, VT, Res, 6043 CurDAG->getConstant(Alt, dl, VT)); 6044 } 6045 6046 return Res; 6047 } 6048 6049 // When CR bit registers are enabled, an extension of an i1 variable to a i32 6050 // or i64 value is lowered in terms of a SELECT_I[48] operation, and thus 6051 // involves constant materialization of a 0 or a 1 or both. If the result of 6052 // the extension is then operated upon by some operator that can be constant 6053 // folded with a constant 0 or 1, and that constant can be materialized using 6054 // only one instruction (like a zero or one), then we should fold in those 6055 // operations with the select. 6056 void PPCDAGToDAGISel::foldBoolExts(SDValue &Res, SDNode *&N) { 6057 if (!Subtarget->useCRBits()) 6058 return; 6059 6060 if (N->getOpcode() != ISD::ZERO_EXTEND && 6061 N->getOpcode() != ISD::SIGN_EXTEND && 6062 N->getOpcode() != ISD::ANY_EXTEND) 6063 return; 6064 6065 if (N->getOperand(0).getValueType() != MVT::i1) 6066 return; 6067 6068 if (!N->hasOneUse()) 6069 return; 6070 6071 SDLoc dl(N); 6072 EVT VT = N->getValueType(0); 6073 SDValue Cond = N->getOperand(0); 6074 SDValue ConstTrue = 6075 CurDAG->getConstant(N->getOpcode() == ISD::SIGN_EXTEND ? -1 : 1, dl, VT); 6076 SDValue ConstFalse = CurDAG->getConstant(0, dl, VT); 6077 6078 do { 6079 SDNode *User = *N->use_begin(); 6080 if (User->getNumOperands() != 2) 6081 break; 6082 6083 auto TryFold = [this, N, User, dl](SDValue Val) { 6084 SDValue UserO0 = User->getOperand(0), UserO1 = User->getOperand(1); 6085 SDValue O0 = UserO0.getNode() == N ? Val : UserO0; 6086 SDValue O1 = UserO1.getNode() == N ? Val : UserO1; 6087 6088 return CurDAG->FoldConstantArithmetic(User->getOpcode(), dl, 6089 User->getValueType(0), {O0, O1}); 6090 }; 6091 6092 // FIXME: When the semantics of the interaction between select and undef 6093 // are clearly defined, it may turn out to be unnecessary to break here. 6094 SDValue TrueRes = TryFold(ConstTrue); 6095 if (!TrueRes || TrueRes.isUndef()) 6096 break; 6097 SDValue FalseRes = TryFold(ConstFalse); 6098 if (!FalseRes || FalseRes.isUndef()) 6099 break; 6100 6101 // For us to materialize these using one instruction, we must be able to 6102 // represent them as signed 16-bit integers. 6103 uint64_t True = cast<ConstantSDNode>(TrueRes)->getZExtValue(), 6104 False = cast<ConstantSDNode>(FalseRes)->getZExtValue(); 6105 if (!isInt<16>(True) || !isInt<16>(False)) 6106 break; 6107 6108 // We can replace User with a new SELECT node, and try again to see if we 6109 // can fold the select with its user. 6110 Res = CurDAG->getSelect(dl, User->getValueType(0), Cond, TrueRes, FalseRes); 6111 N = User; 6112 ConstTrue = TrueRes; 6113 ConstFalse = FalseRes; 6114 } while (N->hasOneUse()); 6115 } 6116 6117 void PPCDAGToDAGISel::PreprocessISelDAG() { 6118 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); 6119 6120 bool MadeChange = false; 6121 while (Position != CurDAG->allnodes_begin()) { 6122 SDNode *N = &*--Position; 6123 if (N->use_empty()) 6124 continue; 6125 6126 SDValue Res; 6127 switch (N->getOpcode()) { 6128 default: break; 6129 case ISD::OR: 6130 Res = combineToCMPB(N); 6131 break; 6132 } 6133 6134 if (!Res) 6135 foldBoolExts(Res, N); 6136 6137 if (Res) { 6138 LLVM_DEBUG(dbgs() << "PPC DAG preprocessing replacing:\nOld: "); 6139 LLVM_DEBUG(N->dump(CurDAG)); 6140 LLVM_DEBUG(dbgs() << "\nNew: "); 6141 LLVM_DEBUG(Res.getNode()->dump(CurDAG)); 6142 LLVM_DEBUG(dbgs() << "\n"); 6143 6144 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Res); 6145 MadeChange = true; 6146 } 6147 } 6148 6149 if (MadeChange) 6150 CurDAG->RemoveDeadNodes(); 6151 } 6152 6153 /// PostprocessISelDAG - Perform some late peephole optimizations 6154 /// on the DAG representation. 6155 void PPCDAGToDAGISel::PostprocessISelDAG() { 6156 // Skip peepholes at -O0. 6157 if (TM.getOptLevel() == CodeGenOpt::None) 6158 return; 6159 6160 PeepholePPC64(); 6161 PeepholeCROps(); 6162 PeepholePPC64ZExt(); 6163 } 6164 6165 // Check if all users of this node will become isel where the second operand 6166 // is the constant zero. If this is so, and if we can negate the condition, 6167 // then we can flip the true and false operands. This will allow the zero to 6168 // be folded with the isel so that we don't need to materialize a register 6169 // containing zero. 6170 bool PPCDAGToDAGISel::AllUsersSelectZero(SDNode *N) { 6171 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 6172 UI != UE; ++UI) { 6173 SDNode *User = *UI; 6174 if (!User->isMachineOpcode()) 6175 return false; 6176 if (User->getMachineOpcode() != PPC::SELECT_I4 && 6177 User->getMachineOpcode() != PPC::SELECT_I8) 6178 return false; 6179 6180 SDNode *Op1 = User->getOperand(1).getNode(); 6181 SDNode *Op2 = User->getOperand(2).getNode(); 6182 // If we have a degenerate select with two equal operands, swapping will 6183 // not do anything, and we may run into an infinite loop. 6184 if (Op1 == Op2) 6185 return false; 6186 6187 if (!Op2->isMachineOpcode()) 6188 return false; 6189 6190 if (Op2->getMachineOpcode() != PPC::LI && 6191 Op2->getMachineOpcode() != PPC::LI8) 6192 return false; 6193 6194 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op2->getOperand(0)); 6195 if (!C) 6196 return false; 6197 6198 if (!C->isZero()) 6199 return false; 6200 } 6201 6202 return true; 6203 } 6204 6205 void PPCDAGToDAGISel::SwapAllSelectUsers(SDNode *N) { 6206 SmallVector<SDNode *, 4> ToReplace; 6207 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end(); 6208 UI != UE; ++UI) { 6209 SDNode *User = *UI; 6210 assert((User->getMachineOpcode() == PPC::SELECT_I4 || 6211 User->getMachineOpcode() == PPC::SELECT_I8) && 6212 "Must have all select users"); 6213 ToReplace.push_back(User); 6214 } 6215 6216 for (SmallVector<SDNode *, 4>::iterator UI = ToReplace.begin(), 6217 UE = ToReplace.end(); UI != UE; ++UI) { 6218 SDNode *User = *UI; 6219 SDNode *ResNode = 6220 CurDAG->getMachineNode(User->getMachineOpcode(), SDLoc(User), 6221 User->getValueType(0), User->getOperand(0), 6222 User->getOperand(2), 6223 User->getOperand(1)); 6224 6225 LLVM_DEBUG(dbgs() << "CR Peephole replacing:\nOld: "); 6226 LLVM_DEBUG(User->dump(CurDAG)); 6227 LLVM_DEBUG(dbgs() << "\nNew: "); 6228 LLVM_DEBUG(ResNode->dump(CurDAG)); 6229 LLVM_DEBUG(dbgs() << "\n"); 6230 6231 ReplaceUses(User, ResNode); 6232 } 6233 } 6234 6235 void PPCDAGToDAGISel::PeepholeCROps() { 6236 bool IsModified; 6237 do { 6238 IsModified = false; 6239 for (SDNode &Node : CurDAG->allnodes()) { 6240 MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(&Node); 6241 if (!MachineNode || MachineNode->use_empty()) 6242 continue; 6243 SDNode *ResNode = MachineNode; 6244 6245 bool Op1Set = false, Op1Unset = false, 6246 Op1Not = false, 6247 Op2Set = false, Op2Unset = false, 6248 Op2Not = false; 6249 6250 unsigned Opcode = MachineNode->getMachineOpcode(); 6251 switch (Opcode) { 6252 default: break; 6253 case PPC::CRAND: 6254 case PPC::CRNAND: 6255 case PPC::CROR: 6256 case PPC::CRXOR: 6257 case PPC::CRNOR: 6258 case PPC::CREQV: 6259 case PPC::CRANDC: 6260 case PPC::CRORC: { 6261 SDValue Op = MachineNode->getOperand(1); 6262 if (Op.isMachineOpcode()) { 6263 if (Op.getMachineOpcode() == PPC::CRSET) 6264 Op2Set = true; 6265 else if (Op.getMachineOpcode() == PPC::CRUNSET) 6266 Op2Unset = true; 6267 else if (Op.getMachineOpcode() == PPC::CRNOR && 6268 Op.getOperand(0) == Op.getOperand(1)) 6269 Op2Not = true; 6270 } 6271 LLVM_FALLTHROUGH; 6272 } 6273 case PPC::BC: 6274 case PPC::BCn: 6275 case PPC::SELECT_I4: 6276 case PPC::SELECT_I8: 6277 case PPC::SELECT_F4: 6278 case PPC::SELECT_F8: 6279 case PPC::SELECT_SPE: 6280 case PPC::SELECT_SPE4: 6281 case PPC::SELECT_VRRC: 6282 case PPC::SELECT_VSFRC: 6283 case PPC::SELECT_VSSRC: 6284 case PPC::SELECT_VSRC: { 6285 SDValue Op = MachineNode->getOperand(0); 6286 if (Op.isMachineOpcode()) { 6287 if (Op.getMachineOpcode() == PPC::CRSET) 6288 Op1Set = true; 6289 else if (Op.getMachineOpcode() == PPC::CRUNSET) 6290 Op1Unset = true; 6291 else if (Op.getMachineOpcode() == PPC::CRNOR && 6292 Op.getOperand(0) == Op.getOperand(1)) 6293 Op1Not = true; 6294 } 6295 } 6296 break; 6297 } 6298 6299 bool SelectSwap = false; 6300 switch (Opcode) { 6301 default: break; 6302 case PPC::CRAND: 6303 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6304 // x & x = x 6305 ResNode = MachineNode->getOperand(0).getNode(); 6306 else if (Op1Set) 6307 // 1 & y = y 6308 ResNode = MachineNode->getOperand(1).getNode(); 6309 else if (Op2Set) 6310 // x & 1 = x 6311 ResNode = MachineNode->getOperand(0).getNode(); 6312 else if (Op1Unset || Op2Unset) 6313 // x & 0 = 0 & y = 0 6314 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode), 6315 MVT::i1); 6316 else if (Op1Not) 6317 // ~x & y = andc(y, x) 6318 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode), 6319 MVT::i1, MachineNode->getOperand(1), 6320 MachineNode->getOperand(0). 6321 getOperand(0)); 6322 else if (Op2Not) 6323 // x & ~y = andc(x, y) 6324 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode), 6325 MVT::i1, MachineNode->getOperand(0), 6326 MachineNode->getOperand(1). 6327 getOperand(0)); 6328 else if (AllUsersSelectZero(MachineNode)) { 6329 ResNode = CurDAG->getMachineNode(PPC::CRNAND, SDLoc(MachineNode), 6330 MVT::i1, MachineNode->getOperand(0), 6331 MachineNode->getOperand(1)); 6332 SelectSwap = true; 6333 } 6334 break; 6335 case PPC::CRNAND: 6336 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6337 // nand(x, x) -> nor(x, x) 6338 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6339 MVT::i1, MachineNode->getOperand(0), 6340 MachineNode->getOperand(0)); 6341 else if (Op1Set) 6342 // nand(1, y) -> nor(y, y) 6343 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6344 MVT::i1, MachineNode->getOperand(1), 6345 MachineNode->getOperand(1)); 6346 else if (Op2Set) 6347 // nand(x, 1) -> nor(x, x) 6348 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6349 MVT::i1, MachineNode->getOperand(0), 6350 MachineNode->getOperand(0)); 6351 else if (Op1Unset || Op2Unset) 6352 // nand(x, 0) = nand(0, y) = 1 6353 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode), 6354 MVT::i1); 6355 else if (Op1Not) 6356 // nand(~x, y) = ~(~x & y) = x | ~y = orc(x, y) 6357 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode), 6358 MVT::i1, MachineNode->getOperand(0). 6359 getOperand(0), 6360 MachineNode->getOperand(1)); 6361 else if (Op2Not) 6362 // nand(x, ~y) = ~x | y = orc(y, x) 6363 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode), 6364 MVT::i1, MachineNode->getOperand(1). 6365 getOperand(0), 6366 MachineNode->getOperand(0)); 6367 else if (AllUsersSelectZero(MachineNode)) { 6368 ResNode = CurDAG->getMachineNode(PPC::CRAND, SDLoc(MachineNode), 6369 MVT::i1, MachineNode->getOperand(0), 6370 MachineNode->getOperand(1)); 6371 SelectSwap = true; 6372 } 6373 break; 6374 case PPC::CROR: 6375 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6376 // x | x = x 6377 ResNode = MachineNode->getOperand(0).getNode(); 6378 else if (Op1Set || Op2Set) 6379 // x | 1 = 1 | y = 1 6380 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode), 6381 MVT::i1); 6382 else if (Op1Unset) 6383 // 0 | y = y 6384 ResNode = MachineNode->getOperand(1).getNode(); 6385 else if (Op2Unset) 6386 // x | 0 = x 6387 ResNode = MachineNode->getOperand(0).getNode(); 6388 else if (Op1Not) 6389 // ~x | y = orc(y, x) 6390 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode), 6391 MVT::i1, MachineNode->getOperand(1), 6392 MachineNode->getOperand(0). 6393 getOperand(0)); 6394 else if (Op2Not) 6395 // x | ~y = orc(x, y) 6396 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode), 6397 MVT::i1, MachineNode->getOperand(0), 6398 MachineNode->getOperand(1). 6399 getOperand(0)); 6400 else if (AllUsersSelectZero(MachineNode)) { 6401 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6402 MVT::i1, MachineNode->getOperand(0), 6403 MachineNode->getOperand(1)); 6404 SelectSwap = true; 6405 } 6406 break; 6407 case PPC::CRXOR: 6408 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6409 // xor(x, x) = 0 6410 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode), 6411 MVT::i1); 6412 else if (Op1Set) 6413 // xor(1, y) -> nor(y, y) 6414 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6415 MVT::i1, MachineNode->getOperand(1), 6416 MachineNode->getOperand(1)); 6417 else if (Op2Set) 6418 // xor(x, 1) -> nor(x, x) 6419 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6420 MVT::i1, MachineNode->getOperand(0), 6421 MachineNode->getOperand(0)); 6422 else if (Op1Unset) 6423 // xor(0, y) = y 6424 ResNode = MachineNode->getOperand(1).getNode(); 6425 else if (Op2Unset) 6426 // xor(x, 0) = x 6427 ResNode = MachineNode->getOperand(0).getNode(); 6428 else if (Op1Not) 6429 // xor(~x, y) = eqv(x, y) 6430 ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode), 6431 MVT::i1, MachineNode->getOperand(0). 6432 getOperand(0), 6433 MachineNode->getOperand(1)); 6434 else if (Op2Not) 6435 // xor(x, ~y) = eqv(x, y) 6436 ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode), 6437 MVT::i1, MachineNode->getOperand(0), 6438 MachineNode->getOperand(1). 6439 getOperand(0)); 6440 else if (AllUsersSelectZero(MachineNode)) { 6441 ResNode = CurDAG->getMachineNode(PPC::CREQV, SDLoc(MachineNode), 6442 MVT::i1, MachineNode->getOperand(0), 6443 MachineNode->getOperand(1)); 6444 SelectSwap = true; 6445 } 6446 break; 6447 case PPC::CRNOR: 6448 if (Op1Set || Op2Set) 6449 // nor(1, y) -> 0 6450 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode), 6451 MVT::i1); 6452 else if (Op1Unset) 6453 // nor(0, y) = ~y -> nor(y, y) 6454 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6455 MVT::i1, MachineNode->getOperand(1), 6456 MachineNode->getOperand(1)); 6457 else if (Op2Unset) 6458 // nor(x, 0) = ~x 6459 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6460 MVT::i1, MachineNode->getOperand(0), 6461 MachineNode->getOperand(0)); 6462 else if (Op1Not) 6463 // nor(~x, y) = andc(x, y) 6464 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode), 6465 MVT::i1, MachineNode->getOperand(0). 6466 getOperand(0), 6467 MachineNode->getOperand(1)); 6468 else if (Op2Not) 6469 // nor(x, ~y) = andc(y, x) 6470 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode), 6471 MVT::i1, MachineNode->getOperand(1). 6472 getOperand(0), 6473 MachineNode->getOperand(0)); 6474 else if (AllUsersSelectZero(MachineNode)) { 6475 ResNode = CurDAG->getMachineNode(PPC::CROR, SDLoc(MachineNode), 6476 MVT::i1, MachineNode->getOperand(0), 6477 MachineNode->getOperand(1)); 6478 SelectSwap = true; 6479 } 6480 break; 6481 case PPC::CREQV: 6482 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6483 // eqv(x, x) = 1 6484 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode), 6485 MVT::i1); 6486 else if (Op1Set) 6487 // eqv(1, y) = y 6488 ResNode = MachineNode->getOperand(1).getNode(); 6489 else if (Op2Set) 6490 // eqv(x, 1) = x 6491 ResNode = MachineNode->getOperand(0).getNode(); 6492 else if (Op1Unset) 6493 // eqv(0, y) = ~y -> nor(y, y) 6494 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6495 MVT::i1, MachineNode->getOperand(1), 6496 MachineNode->getOperand(1)); 6497 else if (Op2Unset) 6498 // eqv(x, 0) = ~x 6499 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6500 MVT::i1, MachineNode->getOperand(0), 6501 MachineNode->getOperand(0)); 6502 else if (Op1Not) 6503 // eqv(~x, y) = xor(x, y) 6504 ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode), 6505 MVT::i1, MachineNode->getOperand(0). 6506 getOperand(0), 6507 MachineNode->getOperand(1)); 6508 else if (Op2Not) 6509 // eqv(x, ~y) = xor(x, y) 6510 ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode), 6511 MVT::i1, MachineNode->getOperand(0), 6512 MachineNode->getOperand(1). 6513 getOperand(0)); 6514 else if (AllUsersSelectZero(MachineNode)) { 6515 ResNode = CurDAG->getMachineNode(PPC::CRXOR, SDLoc(MachineNode), 6516 MVT::i1, MachineNode->getOperand(0), 6517 MachineNode->getOperand(1)); 6518 SelectSwap = true; 6519 } 6520 break; 6521 case PPC::CRANDC: 6522 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6523 // andc(x, x) = 0 6524 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode), 6525 MVT::i1); 6526 else if (Op1Set) 6527 // andc(1, y) = ~y 6528 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6529 MVT::i1, MachineNode->getOperand(1), 6530 MachineNode->getOperand(1)); 6531 else if (Op1Unset || Op2Set) 6532 // andc(0, y) = andc(x, 1) = 0 6533 ResNode = CurDAG->getMachineNode(PPC::CRUNSET, SDLoc(MachineNode), 6534 MVT::i1); 6535 else if (Op2Unset) 6536 // andc(x, 0) = x 6537 ResNode = MachineNode->getOperand(0).getNode(); 6538 else if (Op1Not) 6539 // andc(~x, y) = ~(x | y) = nor(x, y) 6540 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6541 MVT::i1, MachineNode->getOperand(0). 6542 getOperand(0), 6543 MachineNode->getOperand(1)); 6544 else if (Op2Not) 6545 // andc(x, ~y) = x & y 6546 ResNode = CurDAG->getMachineNode(PPC::CRAND, SDLoc(MachineNode), 6547 MVT::i1, MachineNode->getOperand(0), 6548 MachineNode->getOperand(1). 6549 getOperand(0)); 6550 else if (AllUsersSelectZero(MachineNode)) { 6551 ResNode = CurDAG->getMachineNode(PPC::CRORC, SDLoc(MachineNode), 6552 MVT::i1, MachineNode->getOperand(1), 6553 MachineNode->getOperand(0)); 6554 SelectSwap = true; 6555 } 6556 break; 6557 case PPC::CRORC: 6558 if (MachineNode->getOperand(0) == MachineNode->getOperand(1)) 6559 // orc(x, x) = 1 6560 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode), 6561 MVT::i1); 6562 else if (Op1Set || Op2Unset) 6563 // orc(1, y) = orc(x, 0) = 1 6564 ResNode = CurDAG->getMachineNode(PPC::CRSET, SDLoc(MachineNode), 6565 MVT::i1); 6566 else if (Op2Set) 6567 // orc(x, 1) = x 6568 ResNode = MachineNode->getOperand(0).getNode(); 6569 else if (Op1Unset) 6570 // orc(0, y) = ~y 6571 ResNode = CurDAG->getMachineNode(PPC::CRNOR, SDLoc(MachineNode), 6572 MVT::i1, MachineNode->getOperand(1), 6573 MachineNode->getOperand(1)); 6574 else if (Op1Not) 6575 // orc(~x, y) = ~(x & y) = nand(x, y) 6576 ResNode = CurDAG->getMachineNode(PPC::CRNAND, SDLoc(MachineNode), 6577 MVT::i1, MachineNode->getOperand(0). 6578 getOperand(0), 6579 MachineNode->getOperand(1)); 6580 else if (Op2Not) 6581 // orc(x, ~y) = x | y 6582 ResNode = CurDAG->getMachineNode(PPC::CROR, SDLoc(MachineNode), 6583 MVT::i1, MachineNode->getOperand(0), 6584 MachineNode->getOperand(1). 6585 getOperand(0)); 6586 else if (AllUsersSelectZero(MachineNode)) { 6587 ResNode = CurDAG->getMachineNode(PPC::CRANDC, SDLoc(MachineNode), 6588 MVT::i1, MachineNode->getOperand(1), 6589 MachineNode->getOperand(0)); 6590 SelectSwap = true; 6591 } 6592 break; 6593 case PPC::SELECT_I4: 6594 case PPC::SELECT_I8: 6595 case PPC::SELECT_F4: 6596 case PPC::SELECT_F8: 6597 case PPC::SELECT_SPE: 6598 case PPC::SELECT_SPE4: 6599 case PPC::SELECT_VRRC: 6600 case PPC::SELECT_VSFRC: 6601 case PPC::SELECT_VSSRC: 6602 case PPC::SELECT_VSRC: 6603 if (Op1Set) 6604 ResNode = MachineNode->getOperand(1).getNode(); 6605 else if (Op1Unset) 6606 ResNode = MachineNode->getOperand(2).getNode(); 6607 else if (Op1Not) 6608 ResNode = CurDAG->getMachineNode(MachineNode->getMachineOpcode(), 6609 SDLoc(MachineNode), 6610 MachineNode->getValueType(0), 6611 MachineNode->getOperand(0). 6612 getOperand(0), 6613 MachineNode->getOperand(2), 6614 MachineNode->getOperand(1)); 6615 break; 6616 case PPC::BC: 6617 case PPC::BCn: 6618 if (Op1Not) 6619 ResNode = CurDAG->getMachineNode(Opcode == PPC::BC ? PPC::BCn : 6620 PPC::BC, 6621 SDLoc(MachineNode), 6622 MVT::Other, 6623 MachineNode->getOperand(0). 6624 getOperand(0), 6625 MachineNode->getOperand(1), 6626 MachineNode->getOperand(2)); 6627 // FIXME: Handle Op1Set, Op1Unset here too. 6628 break; 6629 } 6630 6631 // If we're inverting this node because it is used only by selects that 6632 // we'd like to swap, then swap the selects before the node replacement. 6633 if (SelectSwap) 6634 SwapAllSelectUsers(MachineNode); 6635 6636 if (ResNode != MachineNode) { 6637 LLVM_DEBUG(dbgs() << "CR Peephole replacing:\nOld: "); 6638 LLVM_DEBUG(MachineNode->dump(CurDAG)); 6639 LLVM_DEBUG(dbgs() << "\nNew: "); 6640 LLVM_DEBUG(ResNode->dump(CurDAG)); 6641 LLVM_DEBUG(dbgs() << "\n"); 6642 6643 ReplaceUses(MachineNode, ResNode); 6644 IsModified = true; 6645 } 6646 } 6647 if (IsModified) 6648 CurDAG->RemoveDeadNodes(); 6649 } while (IsModified); 6650 } 6651 6652 // Gather the set of 32-bit operations that are known to have their 6653 // higher-order 32 bits zero, where ToPromote contains all such operations. 6654 static bool PeepholePPC64ZExtGather(SDValue Op32, 6655 SmallPtrSetImpl<SDNode *> &ToPromote) { 6656 if (!Op32.isMachineOpcode()) 6657 return false; 6658 6659 // First, check for the "frontier" instructions (those that will clear the 6660 // higher-order 32 bits. 6661 6662 // For RLWINM and RLWNM, we need to make sure that the mask does not wrap 6663 // around. If it does not, then these instructions will clear the 6664 // higher-order bits. 6665 if ((Op32.getMachineOpcode() == PPC::RLWINM || 6666 Op32.getMachineOpcode() == PPC::RLWNM) && 6667 Op32.getConstantOperandVal(2) <= Op32.getConstantOperandVal(3)) { 6668 ToPromote.insert(Op32.getNode()); 6669 return true; 6670 } 6671 6672 // SLW and SRW always clear the higher-order bits. 6673 if (Op32.getMachineOpcode() == PPC::SLW || 6674 Op32.getMachineOpcode() == PPC::SRW) { 6675 ToPromote.insert(Op32.getNode()); 6676 return true; 6677 } 6678 6679 // For LI and LIS, we need the immediate to be positive (so that it is not 6680 // sign extended). 6681 if (Op32.getMachineOpcode() == PPC::LI || 6682 Op32.getMachineOpcode() == PPC::LIS) { 6683 if (!isUInt<15>(Op32.getConstantOperandVal(0))) 6684 return false; 6685 6686 ToPromote.insert(Op32.getNode()); 6687 return true; 6688 } 6689 6690 // LHBRX and LWBRX always clear the higher-order bits. 6691 if (Op32.getMachineOpcode() == PPC::LHBRX || 6692 Op32.getMachineOpcode() == PPC::LWBRX) { 6693 ToPromote.insert(Op32.getNode()); 6694 return true; 6695 } 6696 6697 // CNT[LT]ZW always produce a 64-bit value in [0,32], and so is zero extended. 6698 if (Op32.getMachineOpcode() == PPC::CNTLZW || 6699 Op32.getMachineOpcode() == PPC::CNTTZW) { 6700 ToPromote.insert(Op32.getNode()); 6701 return true; 6702 } 6703 6704 // Next, check for those instructions we can look through. 6705 6706 // Assuming the mask does not wrap around, then the higher-order bits are 6707 // taken directly from the first operand. 6708 if (Op32.getMachineOpcode() == PPC::RLWIMI && 6709 Op32.getConstantOperandVal(3) <= Op32.getConstantOperandVal(4)) { 6710 SmallPtrSet<SDNode *, 16> ToPromote1; 6711 if (!PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1)) 6712 return false; 6713 6714 ToPromote.insert(Op32.getNode()); 6715 ToPromote.insert(ToPromote1.begin(), ToPromote1.end()); 6716 return true; 6717 } 6718 6719 // For OR, the higher-order bits are zero if that is true for both operands. 6720 // For SELECT_I4, the same is true (but the relevant operand numbers are 6721 // shifted by 1). 6722 if (Op32.getMachineOpcode() == PPC::OR || 6723 Op32.getMachineOpcode() == PPC::SELECT_I4) { 6724 unsigned B = Op32.getMachineOpcode() == PPC::SELECT_I4 ? 1 : 0; 6725 SmallPtrSet<SDNode *, 16> ToPromote1; 6726 if (!PeepholePPC64ZExtGather(Op32.getOperand(B+0), ToPromote1)) 6727 return false; 6728 if (!PeepholePPC64ZExtGather(Op32.getOperand(B+1), ToPromote1)) 6729 return false; 6730 6731 ToPromote.insert(Op32.getNode()); 6732 ToPromote.insert(ToPromote1.begin(), ToPromote1.end()); 6733 return true; 6734 } 6735 6736 // For ORI and ORIS, we need the higher-order bits of the first operand to be 6737 // zero, and also for the constant to be positive (so that it is not sign 6738 // extended). 6739 if (Op32.getMachineOpcode() == PPC::ORI || 6740 Op32.getMachineOpcode() == PPC::ORIS) { 6741 SmallPtrSet<SDNode *, 16> ToPromote1; 6742 if (!PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1)) 6743 return false; 6744 if (!isUInt<15>(Op32.getConstantOperandVal(1))) 6745 return false; 6746 6747 ToPromote.insert(Op32.getNode()); 6748 ToPromote.insert(ToPromote1.begin(), ToPromote1.end()); 6749 return true; 6750 } 6751 6752 // The higher-order bits of AND are zero if that is true for at least one of 6753 // the operands. 6754 if (Op32.getMachineOpcode() == PPC::AND) { 6755 SmallPtrSet<SDNode *, 16> ToPromote1, ToPromote2; 6756 bool Op0OK = 6757 PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1); 6758 bool Op1OK = 6759 PeepholePPC64ZExtGather(Op32.getOperand(1), ToPromote2); 6760 if (!Op0OK && !Op1OK) 6761 return false; 6762 6763 ToPromote.insert(Op32.getNode()); 6764 6765 if (Op0OK) 6766 ToPromote.insert(ToPromote1.begin(), ToPromote1.end()); 6767 6768 if (Op1OK) 6769 ToPromote.insert(ToPromote2.begin(), ToPromote2.end()); 6770 6771 return true; 6772 } 6773 6774 // For ANDI and ANDIS, the higher-order bits are zero if either that is true 6775 // of the first operand, or if the second operand is positive (so that it is 6776 // not sign extended). 6777 if (Op32.getMachineOpcode() == PPC::ANDI_rec || 6778 Op32.getMachineOpcode() == PPC::ANDIS_rec) { 6779 SmallPtrSet<SDNode *, 16> ToPromote1; 6780 bool Op0OK = 6781 PeepholePPC64ZExtGather(Op32.getOperand(0), ToPromote1); 6782 bool Op1OK = isUInt<15>(Op32.getConstantOperandVal(1)); 6783 if (!Op0OK && !Op1OK) 6784 return false; 6785 6786 ToPromote.insert(Op32.getNode()); 6787 6788 if (Op0OK) 6789 ToPromote.insert(ToPromote1.begin(), ToPromote1.end()); 6790 6791 return true; 6792 } 6793 6794 return false; 6795 } 6796 6797 void PPCDAGToDAGISel::PeepholePPC64ZExt() { 6798 if (!Subtarget->isPPC64()) 6799 return; 6800 6801 // When we zero-extend from i32 to i64, we use a pattern like this: 6802 // def : Pat<(i64 (zext i32:$in)), 6803 // (RLDICL (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $in, sub_32), 6804 // 0, 32)>; 6805 // There are several 32-bit shift/rotate instructions, however, that will 6806 // clear the higher-order bits of their output, rendering the RLDICL 6807 // unnecessary. When that happens, we remove it here, and redefine the 6808 // relevant 32-bit operation to be a 64-bit operation. 6809 6810 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); 6811 6812 bool MadeChange = false; 6813 while (Position != CurDAG->allnodes_begin()) { 6814 SDNode *N = &*--Position; 6815 // Skip dead nodes and any non-machine opcodes. 6816 if (N->use_empty() || !N->isMachineOpcode()) 6817 continue; 6818 6819 if (N->getMachineOpcode() != PPC::RLDICL) 6820 continue; 6821 6822 if (N->getConstantOperandVal(1) != 0 || 6823 N->getConstantOperandVal(2) != 32) 6824 continue; 6825 6826 SDValue ISR = N->getOperand(0); 6827 if (!ISR.isMachineOpcode() || 6828 ISR.getMachineOpcode() != TargetOpcode::INSERT_SUBREG) 6829 continue; 6830 6831 if (!ISR.hasOneUse()) 6832 continue; 6833 6834 if (ISR.getConstantOperandVal(2) != PPC::sub_32) 6835 continue; 6836 6837 SDValue IDef = ISR.getOperand(0); 6838 if (!IDef.isMachineOpcode() || 6839 IDef.getMachineOpcode() != TargetOpcode::IMPLICIT_DEF) 6840 continue; 6841 6842 // We now know that we're looking at a canonical i32 -> i64 zext. See if we 6843 // can get rid of it. 6844 6845 SDValue Op32 = ISR->getOperand(1); 6846 if (!Op32.isMachineOpcode()) 6847 continue; 6848 6849 // There are some 32-bit instructions that always clear the high-order 32 6850 // bits, there are also some instructions (like AND) that we can look 6851 // through. 6852 SmallPtrSet<SDNode *, 16> ToPromote; 6853 if (!PeepholePPC64ZExtGather(Op32, ToPromote)) 6854 continue; 6855 6856 // If the ToPromote set contains nodes that have uses outside of the set 6857 // (except for the original INSERT_SUBREG), then abort the transformation. 6858 bool OutsideUse = false; 6859 for (SDNode *PN : ToPromote) { 6860 for (SDNode *UN : PN->uses()) { 6861 if (!ToPromote.count(UN) && UN != ISR.getNode()) { 6862 OutsideUse = true; 6863 break; 6864 } 6865 } 6866 6867 if (OutsideUse) 6868 break; 6869 } 6870 if (OutsideUse) 6871 continue; 6872 6873 MadeChange = true; 6874 6875 // We now know that this zero extension can be removed by promoting to 6876 // nodes in ToPromote to 64-bit operations, where for operations in the 6877 // frontier of the set, we need to insert INSERT_SUBREGs for their 6878 // operands. 6879 for (SDNode *PN : ToPromote) { 6880 unsigned NewOpcode; 6881 switch (PN->getMachineOpcode()) { 6882 default: 6883 llvm_unreachable("Don't know the 64-bit variant of this instruction"); 6884 case PPC::RLWINM: NewOpcode = PPC::RLWINM8; break; 6885 case PPC::RLWNM: NewOpcode = PPC::RLWNM8; break; 6886 case PPC::SLW: NewOpcode = PPC::SLW8; break; 6887 case PPC::SRW: NewOpcode = PPC::SRW8; break; 6888 case PPC::LI: NewOpcode = PPC::LI8; break; 6889 case PPC::LIS: NewOpcode = PPC::LIS8; break; 6890 case PPC::LHBRX: NewOpcode = PPC::LHBRX8; break; 6891 case PPC::LWBRX: NewOpcode = PPC::LWBRX8; break; 6892 case PPC::CNTLZW: NewOpcode = PPC::CNTLZW8; break; 6893 case PPC::CNTTZW: NewOpcode = PPC::CNTTZW8; break; 6894 case PPC::RLWIMI: NewOpcode = PPC::RLWIMI8; break; 6895 case PPC::OR: NewOpcode = PPC::OR8; break; 6896 case PPC::SELECT_I4: NewOpcode = PPC::SELECT_I8; break; 6897 case PPC::ORI: NewOpcode = PPC::ORI8; break; 6898 case PPC::ORIS: NewOpcode = PPC::ORIS8; break; 6899 case PPC::AND: NewOpcode = PPC::AND8; break; 6900 case PPC::ANDI_rec: 6901 NewOpcode = PPC::ANDI8_rec; 6902 break; 6903 case PPC::ANDIS_rec: 6904 NewOpcode = PPC::ANDIS8_rec; 6905 break; 6906 } 6907 6908 // Note: During the replacement process, the nodes will be in an 6909 // inconsistent state (some instructions will have operands with values 6910 // of the wrong type). Once done, however, everything should be right 6911 // again. 6912 6913 SmallVector<SDValue, 4> Ops; 6914 for (const SDValue &V : PN->ops()) { 6915 if (!ToPromote.count(V.getNode()) && V.getValueType() == MVT::i32 && 6916 !isa<ConstantSDNode>(V)) { 6917 SDValue ReplOpOps[] = { ISR.getOperand(0), V, ISR.getOperand(2) }; 6918 SDNode *ReplOp = 6919 CurDAG->getMachineNode(TargetOpcode::INSERT_SUBREG, SDLoc(V), 6920 ISR.getNode()->getVTList(), ReplOpOps); 6921 Ops.push_back(SDValue(ReplOp, 0)); 6922 } else { 6923 Ops.push_back(V); 6924 } 6925 } 6926 6927 // Because all to-be-promoted nodes only have users that are other 6928 // promoted nodes (or the original INSERT_SUBREG), we can safely replace 6929 // the i32 result value type with i64. 6930 6931 SmallVector<EVT, 2> NewVTs; 6932 SDVTList VTs = PN->getVTList(); 6933 for (unsigned i = 0, ie = VTs.NumVTs; i != ie; ++i) 6934 if (VTs.VTs[i] == MVT::i32) 6935 NewVTs.push_back(MVT::i64); 6936 else 6937 NewVTs.push_back(VTs.VTs[i]); 6938 6939 LLVM_DEBUG(dbgs() << "PPC64 ZExt Peephole morphing:\nOld: "); 6940 LLVM_DEBUG(PN->dump(CurDAG)); 6941 6942 CurDAG->SelectNodeTo(PN, NewOpcode, CurDAG->getVTList(NewVTs), Ops); 6943 6944 LLVM_DEBUG(dbgs() << "\nNew: "); 6945 LLVM_DEBUG(PN->dump(CurDAG)); 6946 LLVM_DEBUG(dbgs() << "\n"); 6947 } 6948 6949 // Now we replace the original zero extend and its associated INSERT_SUBREG 6950 // with the value feeding the INSERT_SUBREG (which has now been promoted to 6951 // return an i64). 6952 6953 LLVM_DEBUG(dbgs() << "PPC64 ZExt Peephole replacing:\nOld: "); 6954 LLVM_DEBUG(N->dump(CurDAG)); 6955 LLVM_DEBUG(dbgs() << "\nNew: "); 6956 LLVM_DEBUG(Op32.getNode()->dump(CurDAG)); 6957 LLVM_DEBUG(dbgs() << "\n"); 6958 6959 ReplaceUses(N, Op32.getNode()); 6960 } 6961 6962 if (MadeChange) 6963 CurDAG->RemoveDeadNodes(); 6964 } 6965 6966 static bool isVSXSwap(SDValue N) { 6967 if (!N->isMachineOpcode()) 6968 return false; 6969 unsigned Opc = N->getMachineOpcode(); 6970 6971 // Single-operand XXPERMDI or the regular XXPERMDI/XXSLDWI where the immediate 6972 // operand is 2. 6973 if (Opc == PPC::XXPERMDIs) { 6974 return isa<ConstantSDNode>(N->getOperand(1)) && 6975 N->getConstantOperandVal(1) == 2; 6976 } else if (Opc == PPC::XXPERMDI || Opc == PPC::XXSLDWI) { 6977 return N->getOperand(0) == N->getOperand(1) && 6978 isa<ConstantSDNode>(N->getOperand(2)) && 6979 N->getConstantOperandVal(2) == 2; 6980 } 6981 6982 return false; 6983 } 6984 6985 // TODO: Make this complete and replace with a table-gen bit. 6986 static bool isLaneInsensitive(SDValue N) { 6987 if (!N->isMachineOpcode()) 6988 return false; 6989 unsigned Opc = N->getMachineOpcode(); 6990 6991 switch (Opc) { 6992 default: 6993 return false; 6994 case PPC::VAVGSB: 6995 case PPC::VAVGUB: 6996 case PPC::VAVGSH: 6997 case PPC::VAVGUH: 6998 case PPC::VAVGSW: 6999 case PPC::VAVGUW: 7000 case PPC::VMAXFP: 7001 case PPC::VMAXSB: 7002 case PPC::VMAXUB: 7003 case PPC::VMAXSH: 7004 case PPC::VMAXUH: 7005 case PPC::VMAXSW: 7006 case PPC::VMAXUW: 7007 case PPC::VMINFP: 7008 case PPC::VMINSB: 7009 case PPC::VMINUB: 7010 case PPC::VMINSH: 7011 case PPC::VMINUH: 7012 case PPC::VMINSW: 7013 case PPC::VMINUW: 7014 case PPC::VADDFP: 7015 case PPC::VADDUBM: 7016 case PPC::VADDUHM: 7017 case PPC::VADDUWM: 7018 case PPC::VSUBFP: 7019 case PPC::VSUBUBM: 7020 case PPC::VSUBUHM: 7021 case PPC::VSUBUWM: 7022 case PPC::VAND: 7023 case PPC::VANDC: 7024 case PPC::VOR: 7025 case PPC::VORC: 7026 case PPC::VXOR: 7027 case PPC::VNOR: 7028 case PPC::VMULUWM: 7029 return true; 7030 } 7031 } 7032 7033 // Try to simplify (xxswap (vec-op (xxswap) (xxswap))) where vec-op is 7034 // lane-insensitive. 7035 static void reduceVSXSwap(SDNode *N, SelectionDAG *DAG) { 7036 // Our desired xxswap might be source of COPY_TO_REGCLASS. 7037 // TODO: Can we put this a common method for DAG? 7038 auto SkipRCCopy = [](SDValue V) { 7039 while (V->isMachineOpcode() && 7040 V->getMachineOpcode() == TargetOpcode::COPY_TO_REGCLASS) { 7041 // All values in the chain should have single use. 7042 if (V->use_empty() || !V->use_begin()->isOnlyUserOf(V.getNode())) 7043 return SDValue(); 7044 V = V->getOperand(0); 7045 } 7046 return V.hasOneUse() ? V : SDValue(); 7047 }; 7048 7049 SDValue VecOp = SkipRCCopy(N->getOperand(0)); 7050 if (!VecOp || !isLaneInsensitive(VecOp)) 7051 return; 7052 7053 SDValue LHS = SkipRCCopy(VecOp.getOperand(0)), 7054 RHS = SkipRCCopy(VecOp.getOperand(1)); 7055 if (!LHS || !RHS || !isVSXSwap(LHS) || !isVSXSwap(RHS)) 7056 return; 7057 7058 // These swaps may still have chain-uses here, count on dead code elimination 7059 // in following passes to remove them. 7060 DAG->ReplaceAllUsesOfValueWith(LHS, LHS.getOperand(0)); 7061 DAG->ReplaceAllUsesOfValueWith(RHS, RHS.getOperand(0)); 7062 DAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), N->getOperand(0)); 7063 } 7064 7065 void PPCDAGToDAGISel::PeepholePPC64() { 7066 SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end(); 7067 7068 while (Position != CurDAG->allnodes_begin()) { 7069 SDNode *N = &*--Position; 7070 // Skip dead nodes and any non-machine opcodes. 7071 if (N->use_empty() || !N->isMachineOpcode()) 7072 continue; 7073 7074 if (isVSXSwap(SDValue(N, 0))) 7075 reduceVSXSwap(N, CurDAG); 7076 7077 unsigned FirstOp; 7078 unsigned StorageOpcode = N->getMachineOpcode(); 7079 bool RequiresMod4Offset = false; 7080 7081 switch (StorageOpcode) { 7082 default: continue; 7083 7084 case PPC::LWA: 7085 case PPC::LD: 7086 case PPC::DFLOADf64: 7087 case PPC::DFLOADf32: 7088 RequiresMod4Offset = true; 7089 LLVM_FALLTHROUGH; 7090 case PPC::LBZ: 7091 case PPC::LBZ8: 7092 case PPC::LFD: 7093 case PPC::LFS: 7094 case PPC::LHA: 7095 case PPC::LHA8: 7096 case PPC::LHZ: 7097 case PPC::LHZ8: 7098 case PPC::LWZ: 7099 case PPC::LWZ8: 7100 FirstOp = 0; 7101 break; 7102 7103 case PPC::STD: 7104 case PPC::DFSTOREf64: 7105 case PPC::DFSTOREf32: 7106 RequiresMod4Offset = true; 7107 LLVM_FALLTHROUGH; 7108 case PPC::STB: 7109 case PPC::STB8: 7110 case PPC::STFD: 7111 case PPC::STFS: 7112 case PPC::STH: 7113 case PPC::STH8: 7114 case PPC::STW: 7115 case PPC::STW8: 7116 FirstOp = 1; 7117 break; 7118 } 7119 7120 // If this is a load or store with a zero offset, or within the alignment, 7121 // we may be able to fold an add-immediate into the memory operation. 7122 // The check against alignment is below, as it can't occur until we check 7123 // the arguments to N 7124 if (!isa<ConstantSDNode>(N->getOperand(FirstOp))) 7125 continue; 7126 7127 SDValue Base = N->getOperand(FirstOp + 1); 7128 if (!Base.isMachineOpcode()) 7129 continue; 7130 7131 unsigned Flags = 0; 7132 bool ReplaceFlags = true; 7133 7134 // When the feeding operation is an add-immediate of some sort, 7135 // determine whether we need to add relocation information to the 7136 // target flags on the immediate operand when we fold it into the 7137 // load instruction. 7138 // 7139 // For something like ADDItocL, the relocation information is 7140 // inferred from the opcode; when we process it in the AsmPrinter, 7141 // we add the necessary relocation there. A load, though, can receive 7142 // relocation from various flavors of ADDIxxx, so we need to carry 7143 // the relocation information in the target flags. 7144 switch (Base.getMachineOpcode()) { 7145 default: continue; 7146 7147 case PPC::ADDI8: 7148 case PPC::ADDI: 7149 // In some cases (such as TLS) the relocation information 7150 // is already in place on the operand, so copying the operand 7151 // is sufficient. 7152 ReplaceFlags = false; 7153 // For these cases, the immediate may not be divisible by 4, in 7154 // which case the fold is illegal for DS-form instructions. (The 7155 // other cases provide aligned addresses and are always safe.) 7156 if (RequiresMod4Offset && 7157 (!isa<ConstantSDNode>(Base.getOperand(1)) || 7158 Base.getConstantOperandVal(1) % 4 != 0)) 7159 continue; 7160 break; 7161 case PPC::ADDIdtprelL: 7162 Flags = PPCII::MO_DTPREL_LO; 7163 break; 7164 case PPC::ADDItlsldL: 7165 Flags = PPCII::MO_TLSLD_LO; 7166 break; 7167 case PPC::ADDItocL: 7168 Flags = PPCII::MO_TOC_LO; 7169 break; 7170 } 7171 7172 SDValue ImmOpnd = Base.getOperand(1); 7173 7174 // On PPC64, the TOC base pointer is guaranteed by the ABI only to have 7175 // 8-byte alignment, and so we can only use offsets less than 8 (otherwise, 7176 // we might have needed different @ha relocation values for the offset 7177 // pointers). 7178 int MaxDisplacement = 7; 7179 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(ImmOpnd)) { 7180 const GlobalValue *GV = GA->getGlobal(); 7181 Align Alignment = GV->getPointerAlignment(CurDAG->getDataLayout()); 7182 MaxDisplacement = std::min((int)Alignment.value() - 1, MaxDisplacement); 7183 } 7184 7185 bool UpdateHBase = false; 7186 SDValue HBase = Base.getOperand(0); 7187 7188 int Offset = N->getConstantOperandVal(FirstOp); 7189 if (ReplaceFlags) { 7190 if (Offset < 0 || Offset > MaxDisplacement) { 7191 // If we have a addi(toc@l)/addis(toc@ha) pair, and the addis has only 7192 // one use, then we can do this for any offset, we just need to also 7193 // update the offset (i.e. the symbol addend) on the addis also. 7194 if (Base.getMachineOpcode() != PPC::ADDItocL) 7195 continue; 7196 7197 if (!HBase.isMachineOpcode() || 7198 HBase.getMachineOpcode() != PPC::ADDIStocHA8) 7199 continue; 7200 7201 if (!Base.hasOneUse() || !HBase.hasOneUse()) 7202 continue; 7203 7204 SDValue HImmOpnd = HBase.getOperand(1); 7205 if (HImmOpnd != ImmOpnd) 7206 continue; 7207 7208 UpdateHBase = true; 7209 } 7210 } else { 7211 // If we're directly folding the addend from an addi instruction, then: 7212 // 1. In general, the offset on the memory access must be zero. 7213 // 2. If the addend is a constant, then it can be combined with a 7214 // non-zero offset, but only if the result meets the encoding 7215 // requirements. 7216 if (auto *C = dyn_cast<ConstantSDNode>(ImmOpnd)) { 7217 Offset += C->getSExtValue(); 7218 7219 if (RequiresMod4Offset && (Offset % 4) != 0) 7220 continue; 7221 7222 if (!isInt<16>(Offset)) 7223 continue; 7224 7225 ImmOpnd = CurDAG->getTargetConstant(Offset, SDLoc(ImmOpnd), 7226 ImmOpnd.getValueType()); 7227 } else if (Offset != 0) { 7228 continue; 7229 } 7230 } 7231 7232 // We found an opportunity. Reverse the operands from the add 7233 // immediate and substitute them into the load or store. If 7234 // needed, update the target flags for the immediate operand to 7235 // reflect the necessary relocation information. 7236 LLVM_DEBUG(dbgs() << "Folding add-immediate into mem-op:\nBase: "); 7237 LLVM_DEBUG(Base->dump(CurDAG)); 7238 LLVM_DEBUG(dbgs() << "\nN: "); 7239 LLVM_DEBUG(N->dump(CurDAG)); 7240 LLVM_DEBUG(dbgs() << "\n"); 7241 7242 // If the relocation information isn't already present on the 7243 // immediate operand, add it now. 7244 if (ReplaceFlags) { 7245 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(ImmOpnd)) { 7246 SDLoc dl(GA); 7247 const GlobalValue *GV = GA->getGlobal(); 7248 Align Alignment = GV->getPointerAlignment(CurDAG->getDataLayout()); 7249 // We can't perform this optimization for data whose alignment 7250 // is insufficient for the instruction encoding. 7251 if (Alignment < 4 && (RequiresMod4Offset || (Offset % 4) != 0)) { 7252 LLVM_DEBUG(dbgs() << "Rejected this candidate for alignment.\n\n"); 7253 continue; 7254 } 7255 ImmOpnd = CurDAG->getTargetGlobalAddress(GV, dl, MVT::i64, Offset, Flags); 7256 } else if (ConstantPoolSDNode *CP = 7257 dyn_cast<ConstantPoolSDNode>(ImmOpnd)) { 7258 const Constant *C = CP->getConstVal(); 7259 ImmOpnd = CurDAG->getTargetConstantPool(C, MVT::i64, CP->getAlign(), 7260 Offset, Flags); 7261 } 7262 } 7263 7264 if (FirstOp == 1) // Store 7265 (void)CurDAG->UpdateNodeOperands(N, N->getOperand(0), ImmOpnd, 7266 Base.getOperand(0), N->getOperand(3)); 7267 else // Load 7268 (void)CurDAG->UpdateNodeOperands(N, ImmOpnd, Base.getOperand(0), 7269 N->getOperand(2)); 7270 7271 if (UpdateHBase) 7272 (void)CurDAG->UpdateNodeOperands(HBase.getNode(), HBase.getOperand(0), 7273 ImmOpnd); 7274 7275 // The add-immediate may now be dead, in which case remove it. 7276 if (Base.getNode()->use_empty()) 7277 CurDAG->RemoveDeadNode(Base.getNode()); 7278 } 7279 } 7280 7281 /// createPPCISelDag - This pass converts a legalized DAG into a 7282 /// PowerPC-specific DAG, ready for instruction scheduling. 7283 /// 7284 FunctionPass *llvm::createPPCISelDag(PPCTargetMachine &TM, 7285 CodeGenOpt::Level OptLevel) { 7286 return new PPCDAGToDAGISel(TM, OptLevel); 7287 } 7288