1 //===-- TargetLowering.cpp - Implement the TargetLowering class -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This implements the TargetLowering class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/CodeGen/TargetLowering.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/CodeGen/CallingConvLower.h" 16 #include "llvm/CodeGen/MachineFrameInfo.h" 17 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/MachineJumpTableInfo.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/SelectionDAG.h" 21 #include "llvm/CodeGen/TargetRegisterInfo.h" 22 #include "llvm/CodeGen/TargetSubtargetInfo.h" 23 #include "llvm/IR/DataLayout.h" 24 #include "llvm/IR/DerivedTypes.h" 25 #include "llvm/IR/GlobalVariable.h" 26 #include "llvm/IR/LLVMContext.h" 27 #include "llvm/MC/MCAsmInfo.h" 28 #include "llvm/MC/MCExpr.h" 29 #include "llvm/Support/ErrorHandling.h" 30 #include "llvm/Support/KnownBits.h" 31 #include "llvm/Support/MathExtras.h" 32 #include "llvm/Target/TargetLoweringObjectFile.h" 33 #include "llvm/Target/TargetMachine.h" 34 #include <cctype> 35 using namespace llvm; 36 37 /// NOTE: The TargetMachine owns TLOF. 38 TargetLowering::TargetLowering(const TargetMachine &tm) 39 : TargetLoweringBase(tm) {} 40 41 const char *TargetLowering::getTargetNodeName(unsigned Opcode) const { 42 return nullptr; 43 } 44 45 bool TargetLowering::isPositionIndependent() const { 46 return getTargetMachine().isPositionIndependent(); 47 } 48 49 /// Check whether a given call node is in tail position within its function. If 50 /// so, it sets Chain to the input chain of the tail call. 51 bool TargetLowering::isInTailCallPosition(SelectionDAG &DAG, SDNode *Node, 52 SDValue &Chain) const { 53 const Function &F = DAG.getMachineFunction().getFunction(); 54 55 // First, check if tail calls have been disabled in this function. 56 if (F.getFnAttribute("disable-tail-calls").getValueAsString() == "true") 57 return false; 58 59 // Conservatively require the attributes of the call to match those of 60 // the return. Ignore NoAlias and NonNull because they don't affect the 61 // call sequence. 62 AttributeList CallerAttrs = F.getAttributes(); 63 if (AttrBuilder(CallerAttrs, AttributeList::ReturnIndex) 64 .removeAttribute(Attribute::NoAlias) 65 .removeAttribute(Attribute::NonNull) 66 .hasAttributes()) 67 return false; 68 69 // It's not safe to eliminate the sign / zero extension of the return value. 70 if (CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt) || 71 CallerAttrs.hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) 72 return false; 73 74 // Check if the only use is a function return node. 75 return isUsedByReturnOnly(Node, Chain); 76 } 77 78 bool TargetLowering::parametersInCSRMatch(const MachineRegisterInfo &MRI, 79 const uint32_t *CallerPreservedMask, 80 const SmallVectorImpl<CCValAssign> &ArgLocs, 81 const SmallVectorImpl<SDValue> &OutVals) const { 82 for (unsigned I = 0, E = ArgLocs.size(); I != E; ++I) { 83 const CCValAssign &ArgLoc = ArgLocs[I]; 84 if (!ArgLoc.isRegLoc()) 85 continue; 86 MCRegister Reg = ArgLoc.getLocReg(); 87 // Only look at callee saved registers. 88 if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg)) 89 continue; 90 // Check that we pass the value used for the caller. 91 // (We look for a CopyFromReg reading a virtual register that is used 92 // for the function live-in value of register Reg) 93 SDValue Value = OutVals[I]; 94 if (Value->getOpcode() != ISD::CopyFromReg) 95 return false; 96 MCRegister ArgReg = cast<RegisterSDNode>(Value->getOperand(1))->getReg(); 97 if (MRI.getLiveInPhysReg(ArgReg) != Reg) 98 return false; 99 } 100 return true; 101 } 102 103 /// Set CallLoweringInfo attribute flags based on a call instruction 104 /// and called function attributes. 105 void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call, 106 unsigned ArgIdx) { 107 IsSExt = Call->paramHasAttr(ArgIdx, Attribute::SExt); 108 IsZExt = Call->paramHasAttr(ArgIdx, Attribute::ZExt); 109 IsInReg = Call->paramHasAttr(ArgIdx, Attribute::InReg); 110 IsSRet = Call->paramHasAttr(ArgIdx, Attribute::StructRet); 111 IsNest = Call->paramHasAttr(ArgIdx, Attribute::Nest); 112 IsByVal = Call->paramHasAttr(ArgIdx, Attribute::ByVal); 113 IsPreallocated = Call->paramHasAttr(ArgIdx, Attribute::Preallocated); 114 IsInAlloca = Call->paramHasAttr(ArgIdx, Attribute::InAlloca); 115 IsReturned = Call->paramHasAttr(ArgIdx, Attribute::Returned); 116 IsSwiftSelf = Call->paramHasAttr(ArgIdx, Attribute::SwiftSelf); 117 IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError); 118 Alignment = Call->getParamAlign(ArgIdx); 119 ByValType = nullptr; 120 if (IsByVal) 121 ByValType = Call->getParamByValType(ArgIdx); 122 PreallocatedType = nullptr; 123 if (IsPreallocated) 124 PreallocatedType = Call->getParamPreallocatedType(ArgIdx); 125 } 126 127 /// Generate a libcall taking the given operands as arguments and returning a 128 /// result of type RetVT. 129 std::pair<SDValue, SDValue> 130 TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT, 131 ArrayRef<SDValue> Ops, 132 MakeLibCallOptions CallOptions, 133 const SDLoc &dl, 134 SDValue InChain) const { 135 if (!InChain) 136 InChain = DAG.getEntryNode(); 137 138 TargetLowering::ArgListTy Args; 139 Args.reserve(Ops.size()); 140 141 TargetLowering::ArgListEntry Entry; 142 for (unsigned i = 0; i < Ops.size(); ++i) { 143 SDValue NewOp = Ops[i]; 144 Entry.Node = NewOp; 145 Entry.Ty = Entry.Node.getValueType().getTypeForEVT(*DAG.getContext()); 146 Entry.IsSExt = shouldSignExtendTypeInLibCall(NewOp.getValueType(), 147 CallOptions.IsSExt); 148 Entry.IsZExt = !Entry.IsSExt; 149 150 if (CallOptions.IsSoften && 151 !shouldExtendTypeInLibCall(CallOptions.OpsVTBeforeSoften[i])) { 152 Entry.IsSExt = Entry.IsZExt = false; 153 } 154 Args.push_back(Entry); 155 } 156 157 if (LC == RTLIB::UNKNOWN_LIBCALL) 158 report_fatal_error("Unsupported library call operation!"); 159 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC), 160 getPointerTy(DAG.getDataLayout())); 161 162 Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext()); 163 TargetLowering::CallLoweringInfo CLI(DAG); 164 bool signExtend = shouldSignExtendTypeInLibCall(RetVT, CallOptions.IsSExt); 165 bool zeroExtend = !signExtend; 166 167 if (CallOptions.IsSoften && 168 !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) { 169 signExtend = zeroExtend = false; 170 } 171 172 CLI.setDebugLoc(dl) 173 .setChain(InChain) 174 .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args)) 175 .setNoReturn(CallOptions.DoesNotReturn) 176 .setDiscardResult(!CallOptions.IsReturnValueUsed) 177 .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization) 178 .setSExtResult(signExtend) 179 .setZExtResult(zeroExtend); 180 return LowerCallTo(CLI); 181 } 182 183 bool TargetLowering::findOptimalMemOpLowering( 184 std::vector<EVT> &MemOps, unsigned Limit, const MemOp &Op, unsigned DstAS, 185 unsigned SrcAS, const AttributeList &FuncAttributes) const { 186 if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign()) 187 return false; 188 189 EVT VT = getOptimalMemOpType(Op, FuncAttributes); 190 191 if (VT == MVT::Other) { 192 // Use the largest integer type whose alignment constraints are satisfied. 193 // We only need to check DstAlign here as SrcAlign is always greater or 194 // equal to DstAlign (or zero). 195 VT = MVT::i64; 196 if (Op.isFixedDstAlign()) 197 while ( 198 Op.getDstAlign() < (VT.getSizeInBits() / 8) && 199 !allowsMisalignedMemoryAccesses(VT, DstAS, Op.getDstAlign().value())) 200 VT = (MVT::SimpleValueType)(VT.getSimpleVT().SimpleTy - 1); 201 assert(VT.isInteger()); 202 203 // Find the largest legal integer type. 204 MVT LVT = MVT::i64; 205 while (!isTypeLegal(LVT)) 206 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1); 207 assert(LVT.isInteger()); 208 209 // If the type we've chosen is larger than the largest legal integer type 210 // then use that instead. 211 if (VT.bitsGT(LVT)) 212 VT = LVT; 213 } 214 215 unsigned NumMemOps = 0; 216 uint64_t Size = Op.size(); 217 while (Size) { 218 unsigned VTSize = VT.getSizeInBits() / 8; 219 while (VTSize > Size) { 220 // For now, only use non-vector load / store's for the left-over pieces. 221 EVT NewVT = VT; 222 unsigned NewVTSize; 223 224 bool Found = false; 225 if (VT.isVector() || VT.isFloatingPoint()) { 226 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32; 227 if (isOperationLegalOrCustom(ISD::STORE, NewVT) && 228 isSafeMemOpType(NewVT.getSimpleVT())) 229 Found = true; 230 else if (NewVT == MVT::i64 && 231 isOperationLegalOrCustom(ISD::STORE, MVT::f64) && 232 isSafeMemOpType(MVT::f64)) { 233 // i64 is usually not legal on 32-bit targets, but f64 may be. 234 NewVT = MVT::f64; 235 Found = true; 236 } 237 } 238 239 if (!Found) { 240 do { 241 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1); 242 if (NewVT == MVT::i8) 243 break; 244 } while (!isSafeMemOpType(NewVT.getSimpleVT())); 245 } 246 NewVTSize = NewVT.getSizeInBits() / 8; 247 248 // If the new VT cannot cover all of the remaining bits, then consider 249 // issuing a (or a pair of) unaligned and overlapping load / store. 250 bool Fast; 251 if (NumMemOps && Op.allowOverlap() && NewVTSize < Size && 252 allowsMisalignedMemoryAccesses( 253 VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0, 254 MachineMemOperand::MONone, &Fast) && 255 Fast) 256 VTSize = Size; 257 else { 258 VT = NewVT; 259 VTSize = NewVTSize; 260 } 261 } 262 263 if (++NumMemOps > Limit) 264 return false; 265 266 MemOps.push_back(VT); 267 Size -= VTSize; 268 } 269 270 return true; 271 } 272 273 /// Soften the operands of a comparison. This code is shared among BR_CC, 274 /// SELECT_CC, and SETCC handlers. 275 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 276 SDValue &NewLHS, SDValue &NewRHS, 277 ISD::CondCode &CCCode, 278 const SDLoc &dl, const SDValue OldLHS, 279 const SDValue OldRHS) const { 280 SDValue Chain; 281 return softenSetCCOperands(DAG, VT, NewLHS, NewRHS, CCCode, dl, OldLHS, 282 OldRHS, Chain); 283 } 284 285 void TargetLowering::softenSetCCOperands(SelectionDAG &DAG, EVT VT, 286 SDValue &NewLHS, SDValue &NewRHS, 287 ISD::CondCode &CCCode, 288 const SDLoc &dl, const SDValue OldLHS, 289 const SDValue OldRHS, 290 SDValue &Chain, 291 bool IsSignaling) const { 292 // FIXME: Currently we cannot really respect all IEEE predicates due to libgcc 293 // not supporting it. We can update this code when libgcc provides such 294 // functions. 295 296 assert((VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f128 || VT == MVT::ppcf128) 297 && "Unsupported setcc type!"); 298 299 // Expand into one or more soft-fp libcall(s). 300 RTLIB::Libcall LC1 = RTLIB::UNKNOWN_LIBCALL, LC2 = RTLIB::UNKNOWN_LIBCALL; 301 bool ShouldInvertCC = false; 302 switch (CCCode) { 303 case ISD::SETEQ: 304 case ISD::SETOEQ: 305 LC1 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 306 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 307 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 308 break; 309 case ISD::SETNE: 310 case ISD::SETUNE: 311 LC1 = (VT == MVT::f32) ? RTLIB::UNE_F32 : 312 (VT == MVT::f64) ? RTLIB::UNE_F64 : 313 (VT == MVT::f128) ? RTLIB::UNE_F128 : RTLIB::UNE_PPCF128; 314 break; 315 case ISD::SETGE: 316 case ISD::SETOGE: 317 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 318 (VT == MVT::f64) ? RTLIB::OGE_F64 : 319 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 320 break; 321 case ISD::SETLT: 322 case ISD::SETOLT: 323 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 324 (VT == MVT::f64) ? RTLIB::OLT_F64 : 325 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 326 break; 327 case ISD::SETLE: 328 case ISD::SETOLE: 329 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 330 (VT == MVT::f64) ? RTLIB::OLE_F64 : 331 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 332 break; 333 case ISD::SETGT: 334 case ISD::SETOGT: 335 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 336 (VT == MVT::f64) ? RTLIB::OGT_F64 : 337 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 338 break; 339 case ISD::SETO: 340 ShouldInvertCC = true; 341 LLVM_FALLTHROUGH; 342 case ISD::SETUO: 343 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 344 (VT == MVT::f64) ? RTLIB::UO_F64 : 345 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 346 break; 347 case ISD::SETONE: 348 // SETONE = O && UNE 349 ShouldInvertCC = true; 350 LLVM_FALLTHROUGH; 351 case ISD::SETUEQ: 352 LC1 = (VT == MVT::f32) ? RTLIB::UO_F32 : 353 (VT == MVT::f64) ? RTLIB::UO_F64 : 354 (VT == MVT::f128) ? RTLIB::UO_F128 : RTLIB::UO_PPCF128; 355 LC2 = (VT == MVT::f32) ? RTLIB::OEQ_F32 : 356 (VT == MVT::f64) ? RTLIB::OEQ_F64 : 357 (VT == MVT::f128) ? RTLIB::OEQ_F128 : RTLIB::OEQ_PPCF128; 358 break; 359 default: 360 // Invert CC for unordered comparisons 361 ShouldInvertCC = true; 362 switch (CCCode) { 363 case ISD::SETULT: 364 LC1 = (VT == MVT::f32) ? RTLIB::OGE_F32 : 365 (VT == MVT::f64) ? RTLIB::OGE_F64 : 366 (VT == MVT::f128) ? RTLIB::OGE_F128 : RTLIB::OGE_PPCF128; 367 break; 368 case ISD::SETULE: 369 LC1 = (VT == MVT::f32) ? RTLIB::OGT_F32 : 370 (VT == MVT::f64) ? RTLIB::OGT_F64 : 371 (VT == MVT::f128) ? RTLIB::OGT_F128 : RTLIB::OGT_PPCF128; 372 break; 373 case ISD::SETUGT: 374 LC1 = (VT == MVT::f32) ? RTLIB::OLE_F32 : 375 (VT == MVT::f64) ? RTLIB::OLE_F64 : 376 (VT == MVT::f128) ? RTLIB::OLE_F128 : RTLIB::OLE_PPCF128; 377 break; 378 case ISD::SETUGE: 379 LC1 = (VT == MVT::f32) ? RTLIB::OLT_F32 : 380 (VT == MVT::f64) ? RTLIB::OLT_F64 : 381 (VT == MVT::f128) ? RTLIB::OLT_F128 : RTLIB::OLT_PPCF128; 382 break; 383 default: llvm_unreachable("Do not know how to soften this setcc!"); 384 } 385 } 386 387 // Use the target specific return value for comparions lib calls. 388 EVT RetVT = getCmpLibcallReturnType(); 389 SDValue Ops[2] = {NewLHS, NewRHS}; 390 TargetLowering::MakeLibCallOptions CallOptions; 391 EVT OpsVT[2] = { OldLHS.getValueType(), 392 OldRHS.getValueType() }; 393 CallOptions.setTypeListBeforeSoften(OpsVT, RetVT, true); 394 auto Call = makeLibCall(DAG, LC1, RetVT, Ops, CallOptions, dl, Chain); 395 NewLHS = Call.first; 396 NewRHS = DAG.getConstant(0, dl, RetVT); 397 398 CCCode = getCmpLibcallCC(LC1); 399 if (ShouldInvertCC) { 400 assert(RetVT.isInteger()); 401 CCCode = getSetCCInverse(CCCode, RetVT); 402 } 403 404 if (LC2 == RTLIB::UNKNOWN_LIBCALL) { 405 // Update Chain. 406 Chain = Call.second; 407 } else { 408 EVT SetCCVT = 409 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), RetVT); 410 SDValue Tmp = DAG.getSetCC(dl, SetCCVT, NewLHS, NewRHS, CCCode); 411 auto Call2 = makeLibCall(DAG, LC2, RetVT, Ops, CallOptions, dl, Chain); 412 CCCode = getCmpLibcallCC(LC2); 413 if (ShouldInvertCC) 414 CCCode = getSetCCInverse(CCCode, RetVT); 415 NewLHS = DAG.getSetCC(dl, SetCCVT, Call2.first, NewRHS, CCCode); 416 if (Chain) 417 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Call.second, 418 Call2.second); 419 NewLHS = DAG.getNode(ShouldInvertCC ? ISD::AND : ISD::OR, dl, 420 Tmp.getValueType(), Tmp, NewLHS); 421 NewRHS = SDValue(); 422 } 423 } 424 425 /// Return the entry encoding for a jump table in the current function. The 426 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum. 427 unsigned TargetLowering::getJumpTableEncoding() const { 428 // In non-pic modes, just use the address of a block. 429 if (!isPositionIndependent()) 430 return MachineJumpTableInfo::EK_BlockAddress; 431 432 // In PIC mode, if the target supports a GPRel32 directive, use it. 433 if (getTargetMachine().getMCAsmInfo()->getGPRel32Directive() != nullptr) 434 return MachineJumpTableInfo::EK_GPRel32BlockAddress; 435 436 // Otherwise, use a label difference. 437 return MachineJumpTableInfo::EK_LabelDifference32; 438 } 439 440 SDValue TargetLowering::getPICJumpTableRelocBase(SDValue Table, 441 SelectionDAG &DAG) const { 442 // If our PIC model is GP relative, use the global offset table as the base. 443 unsigned JTEncoding = getJumpTableEncoding(); 444 445 if ((JTEncoding == MachineJumpTableInfo::EK_GPRel64BlockAddress) || 446 (JTEncoding == MachineJumpTableInfo::EK_GPRel32BlockAddress)) 447 return DAG.getGLOBAL_OFFSET_TABLE(getPointerTy(DAG.getDataLayout())); 448 449 return Table; 450 } 451 452 /// This returns the relocation base for the given PIC jumptable, the same as 453 /// getPICJumpTableRelocBase, but as an MCExpr. 454 const MCExpr * 455 TargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF, 456 unsigned JTI,MCContext &Ctx) const{ 457 // The normal PIC reloc base is the label at the start of the jump table. 458 return MCSymbolRefExpr::create(MF->getJTISymbol(JTI, Ctx), Ctx); 459 } 460 461 bool 462 TargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const { 463 const TargetMachine &TM = getTargetMachine(); 464 const GlobalValue *GV = GA->getGlobal(); 465 466 // If the address is not even local to this DSO we will have to load it from 467 // a got and then add the offset. 468 if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV)) 469 return false; 470 471 // If the code is position independent we will have to add a base register. 472 if (isPositionIndependent()) 473 return false; 474 475 // Otherwise we can do it. 476 return true; 477 } 478 479 //===----------------------------------------------------------------------===// 480 // Optimization Methods 481 //===----------------------------------------------------------------------===// 482 483 /// If the specified instruction has a constant integer operand and there are 484 /// bits set in that constant that are not demanded, then clear those bits and 485 /// return true. 486 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 487 const APInt &DemandedBits, 488 const APInt &DemandedElts, 489 TargetLoweringOpt &TLO) const { 490 SDLoc DL(Op); 491 unsigned Opcode = Op.getOpcode(); 492 493 // Do target-specific constant optimization. 494 if (targetShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 495 return TLO.New.getNode(); 496 497 // FIXME: ISD::SELECT, ISD::SELECT_CC 498 switch (Opcode) { 499 default: 500 break; 501 case ISD::XOR: 502 case ISD::AND: 503 case ISD::OR: { 504 auto *Op1C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 505 if (!Op1C) 506 return false; 507 508 // If this is a 'not' op, don't touch it because that's a canonical form. 509 const APInt &C = Op1C->getAPIntValue(); 510 if (Opcode == ISD::XOR && DemandedBits.isSubsetOf(C)) 511 return false; 512 513 if (!C.isSubsetOf(DemandedBits)) { 514 EVT VT = Op.getValueType(); 515 SDValue NewC = TLO.DAG.getConstant(DemandedBits & C, DL, VT); 516 SDValue NewOp = TLO.DAG.getNode(Opcode, DL, VT, Op.getOperand(0), NewC); 517 return TLO.CombineTo(Op, NewOp); 518 } 519 520 break; 521 } 522 } 523 524 return false; 525 } 526 527 bool TargetLowering::ShrinkDemandedConstant(SDValue Op, 528 const APInt &DemandedBits, 529 TargetLoweringOpt &TLO) const { 530 EVT VT = Op.getValueType(); 531 APInt DemandedElts = VT.isVector() 532 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 533 : APInt(1, 1); 534 return ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO); 535 } 536 537 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. 538 /// This uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be 539 /// generalized for targets with other types of implicit widening casts. 540 bool TargetLowering::ShrinkDemandedOp(SDValue Op, unsigned BitWidth, 541 const APInt &Demanded, 542 TargetLoweringOpt &TLO) const { 543 assert(Op.getNumOperands() == 2 && 544 "ShrinkDemandedOp only supports binary operators!"); 545 assert(Op.getNode()->getNumValues() == 1 && 546 "ShrinkDemandedOp only supports nodes with one result!"); 547 548 SelectionDAG &DAG = TLO.DAG; 549 SDLoc dl(Op); 550 551 // Early return, as this function cannot handle vector types. 552 if (Op.getValueType().isVector()) 553 return false; 554 555 // Don't do this if the node has another user, which may require the 556 // full value. 557 if (!Op.getNode()->hasOneUse()) 558 return false; 559 560 // Search for the smallest integer type with free casts to and from 561 // Op's type. For expedience, just check power-of-2 integer types. 562 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 563 unsigned DemandedSize = Demanded.getActiveBits(); 564 unsigned SmallVTBits = DemandedSize; 565 if (!isPowerOf2_32(SmallVTBits)) 566 SmallVTBits = NextPowerOf2(SmallVTBits); 567 for (; SmallVTBits < BitWidth; SmallVTBits = NextPowerOf2(SmallVTBits)) { 568 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), SmallVTBits); 569 if (TLI.isTruncateFree(Op.getValueType(), SmallVT) && 570 TLI.isZExtFree(SmallVT, Op.getValueType())) { 571 // We found a type with free casts. 572 SDValue X = DAG.getNode( 573 Op.getOpcode(), dl, SmallVT, 574 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(0)), 575 DAG.getNode(ISD::TRUNCATE, dl, SmallVT, Op.getOperand(1))); 576 assert(DemandedSize <= SmallVTBits && "Narrowed below demanded bits?"); 577 SDValue Z = DAG.getNode(ISD::ANY_EXTEND, dl, Op.getValueType(), X); 578 return TLO.CombineTo(Op, Z); 579 } 580 } 581 return false; 582 } 583 584 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 585 DAGCombinerInfo &DCI) const { 586 SelectionDAG &DAG = DCI.DAG; 587 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 588 !DCI.isBeforeLegalizeOps()); 589 KnownBits Known; 590 591 bool Simplified = SimplifyDemandedBits(Op, DemandedBits, Known, TLO); 592 if (Simplified) { 593 DCI.AddToWorklist(Op.getNode()); 594 DCI.CommitTargetLoweringOpt(TLO); 595 } 596 return Simplified; 597 } 598 599 bool TargetLowering::SimplifyDemandedBits(SDValue Op, const APInt &DemandedBits, 600 KnownBits &Known, 601 TargetLoweringOpt &TLO, 602 unsigned Depth, 603 bool AssumeSingleUse) const { 604 EVT VT = Op.getValueType(); 605 606 // TODO: We can probably do more work on calculating the known bits and 607 // simplifying the operations for scalable vectors, but for now we just 608 // bail out. 609 if (VT.isScalableVector()) { 610 // Pretend we don't know anything for now. 611 Known = KnownBits(DemandedBits.getBitWidth()); 612 return false; 613 } 614 615 APInt DemandedElts = VT.isVector() 616 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 617 : APInt(1, 1); 618 return SimplifyDemandedBits(Op, DemandedBits, DemandedElts, Known, TLO, Depth, 619 AssumeSingleUse); 620 } 621 622 // TODO: Can we merge SelectionDAG::GetDemandedBits into this? 623 // TODO: Under what circumstances can we create nodes? Constant folding? 624 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 625 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 626 SelectionDAG &DAG, unsigned Depth) const { 627 // Limit search depth. 628 if (Depth >= SelectionDAG::MaxRecursionDepth) 629 return SDValue(); 630 631 // Ignore UNDEFs. 632 if (Op.isUndef()) 633 return SDValue(); 634 635 // Not demanding any bits/elts from Op. 636 if (DemandedBits == 0 || DemandedElts == 0) 637 return DAG.getUNDEF(Op.getValueType()); 638 639 unsigned NumElts = DemandedElts.getBitWidth(); 640 unsigned BitWidth = DemandedBits.getBitWidth(); 641 KnownBits LHSKnown, RHSKnown; 642 switch (Op.getOpcode()) { 643 case ISD::BITCAST: { 644 SDValue Src = peekThroughBitcasts(Op.getOperand(0)); 645 EVT SrcVT = Src.getValueType(); 646 EVT DstVT = Op.getValueType(); 647 if (SrcVT == DstVT) 648 return Src; 649 650 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 651 unsigned NumDstEltBits = DstVT.getScalarSizeInBits(); 652 if (NumSrcEltBits == NumDstEltBits) 653 if (SDValue V = SimplifyMultipleUseDemandedBits( 654 Src, DemandedBits, DemandedElts, DAG, Depth + 1)) 655 return DAG.getBitcast(DstVT, V); 656 657 // TODO - bigendian once we have test coverage. 658 if (SrcVT.isVector() && (NumDstEltBits % NumSrcEltBits) == 0 && 659 DAG.getDataLayout().isLittleEndian()) { 660 unsigned Scale = NumDstEltBits / NumSrcEltBits; 661 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 662 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 663 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 664 for (unsigned i = 0; i != Scale; ++i) { 665 unsigned Offset = i * NumSrcEltBits; 666 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset); 667 if (!Sub.isNullValue()) { 668 DemandedSrcBits |= Sub; 669 for (unsigned j = 0; j != NumElts; ++j) 670 if (DemandedElts[j]) 671 DemandedSrcElts.setBit((j * Scale) + i); 672 } 673 } 674 675 if (SDValue V = SimplifyMultipleUseDemandedBits( 676 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 677 return DAG.getBitcast(DstVT, V); 678 } 679 680 // TODO - bigendian once we have test coverage. 681 if ((NumSrcEltBits % NumDstEltBits) == 0 && 682 DAG.getDataLayout().isLittleEndian()) { 683 unsigned Scale = NumSrcEltBits / NumDstEltBits; 684 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 685 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 686 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 687 for (unsigned i = 0; i != NumElts; ++i) 688 if (DemandedElts[i]) { 689 unsigned Offset = (i % Scale) * NumDstEltBits; 690 DemandedSrcBits.insertBits(DemandedBits, Offset); 691 DemandedSrcElts.setBit(i / Scale); 692 } 693 694 if (SDValue V = SimplifyMultipleUseDemandedBits( 695 Src, DemandedSrcBits, DemandedSrcElts, DAG, Depth + 1)) 696 return DAG.getBitcast(DstVT, V); 697 } 698 699 break; 700 } 701 case ISD::AND: { 702 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 703 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 704 705 // If all of the demanded bits are known 1 on one side, return the other. 706 // These bits cannot contribute to the result of the 'and' in this 707 // context. 708 if (DemandedBits.isSubsetOf(LHSKnown.Zero | RHSKnown.One)) 709 return Op.getOperand(0); 710 if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.One)) 711 return Op.getOperand(1); 712 break; 713 } 714 case ISD::OR: { 715 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 716 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 717 718 // If all of the demanded bits are known zero on one side, return the 719 // other. These bits cannot contribute to the result of the 'or' in this 720 // context. 721 if (DemandedBits.isSubsetOf(LHSKnown.One | RHSKnown.Zero)) 722 return Op.getOperand(0); 723 if (DemandedBits.isSubsetOf(RHSKnown.One | LHSKnown.Zero)) 724 return Op.getOperand(1); 725 break; 726 } 727 case ISD::XOR: { 728 LHSKnown = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1); 729 RHSKnown = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1); 730 731 // If all of the demanded bits are known zero on one side, return the 732 // other. 733 if (DemandedBits.isSubsetOf(RHSKnown.Zero)) 734 return Op.getOperand(0); 735 if (DemandedBits.isSubsetOf(LHSKnown.Zero)) 736 return Op.getOperand(1); 737 break; 738 } 739 case ISD::SHL: { 740 // If we are only demanding sign bits then we can use the shift source 741 // directly. 742 if (const APInt *MaxSA = 743 DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 744 SDValue Op0 = Op.getOperand(0); 745 unsigned ShAmt = MaxSA->getZExtValue(); 746 unsigned NumSignBits = 747 DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 748 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 749 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 750 return Op0; 751 } 752 break; 753 } 754 case ISD::SETCC: { 755 SDValue Op0 = Op.getOperand(0); 756 SDValue Op1 = Op.getOperand(1); 757 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 758 // If (1) we only need the sign-bit, (2) the setcc operands are the same 759 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 760 // -1, we may be able to bypass the setcc. 761 if (DemandedBits.isSignMask() && 762 Op0.getScalarValueSizeInBits() == BitWidth && 763 getBooleanContents(Op0.getValueType()) == 764 BooleanContent::ZeroOrNegativeOneBooleanContent) { 765 // If we're testing X < 0, then this compare isn't needed - just use X! 766 // FIXME: We're limiting to integer types here, but this should also work 767 // if we don't care about FP signed-zero. The use of SETLT with FP means 768 // that we don't care about NaNs. 769 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 770 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 771 return Op0; 772 } 773 break; 774 } 775 case ISD::SIGN_EXTEND_INREG: { 776 // If none of the extended bits are demanded, eliminate the sextinreg. 777 SDValue Op0 = Op.getOperand(0); 778 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 779 unsigned ExBits = ExVT.getScalarSizeInBits(); 780 if (DemandedBits.getActiveBits() <= ExBits) 781 return Op0; 782 // If the input is already sign extended, just drop the extension. 783 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 784 if (NumSignBits >= (BitWidth - ExBits + 1)) 785 return Op0; 786 break; 787 } 788 case ISD::ANY_EXTEND_VECTOR_INREG: 789 case ISD::SIGN_EXTEND_VECTOR_INREG: 790 case ISD::ZERO_EXTEND_VECTOR_INREG: { 791 // If we only want the lowest element and none of extended bits, then we can 792 // return the bitcasted source vector. 793 SDValue Src = Op.getOperand(0); 794 EVT SrcVT = Src.getValueType(); 795 EVT DstVT = Op.getValueType(); 796 if (DemandedElts == 1 && DstVT.getSizeInBits() == SrcVT.getSizeInBits() && 797 DAG.getDataLayout().isLittleEndian() && 798 DemandedBits.getActiveBits() <= SrcVT.getScalarSizeInBits()) { 799 return DAG.getBitcast(DstVT, Src); 800 } 801 break; 802 } 803 case ISD::INSERT_VECTOR_ELT: { 804 // If we don't demand the inserted element, return the base vector. 805 SDValue Vec = Op.getOperand(0); 806 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 807 EVT VecVT = Vec.getValueType(); 808 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) && 809 !DemandedElts[CIdx->getZExtValue()]) 810 return Vec; 811 break; 812 } 813 case ISD::INSERT_SUBVECTOR: { 814 // If we don't demand the inserted subvector, return the base vector. 815 SDValue Vec = Op.getOperand(0); 816 SDValue Sub = Op.getOperand(1); 817 uint64_t Idx = Op.getConstantOperandVal(2); 818 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 819 if (DemandedElts.extractBits(NumSubElts, Idx) == 0) 820 return Vec; 821 break; 822 } 823 case ISD::VECTOR_SHUFFLE: { 824 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 825 826 // If all the demanded elts are from one operand and are inline, 827 // then we can use the operand directly. 828 bool AllUndef = true, IdentityLHS = true, IdentityRHS = true; 829 for (unsigned i = 0; i != NumElts; ++i) { 830 int M = ShuffleMask[i]; 831 if (M < 0 || !DemandedElts[i]) 832 continue; 833 AllUndef = false; 834 IdentityLHS &= (M == (int)i); 835 IdentityRHS &= ((M - NumElts) == i); 836 } 837 838 if (AllUndef) 839 return DAG.getUNDEF(Op.getValueType()); 840 if (IdentityLHS) 841 return Op.getOperand(0); 842 if (IdentityRHS) 843 return Op.getOperand(1); 844 break; 845 } 846 default: 847 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) 848 if (SDValue V = SimplifyMultipleUseDemandedBitsForTargetNode( 849 Op, DemandedBits, DemandedElts, DAG, Depth)) 850 return V; 851 break; 852 } 853 return SDValue(); 854 } 855 856 SDValue TargetLowering::SimplifyMultipleUseDemandedBits( 857 SDValue Op, const APInt &DemandedBits, SelectionDAG &DAG, 858 unsigned Depth) const { 859 EVT VT = Op.getValueType(); 860 APInt DemandedElts = VT.isVector() 861 ? APInt::getAllOnesValue(VT.getVectorNumElements()) 862 : APInt(1, 1); 863 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 864 Depth); 865 } 866 867 SDValue TargetLowering::SimplifyMultipleUseDemandedVectorElts( 868 SDValue Op, const APInt &DemandedElts, SelectionDAG &DAG, 869 unsigned Depth) const { 870 APInt DemandedBits = APInt::getAllOnesValue(Op.getScalarValueSizeInBits()); 871 return SimplifyMultipleUseDemandedBits(Op, DemandedBits, DemandedElts, DAG, 872 Depth); 873 } 874 875 /// Look at Op. At this point, we know that only the OriginalDemandedBits of the 876 /// result of Op are ever used downstream. If we can use this information to 877 /// simplify Op, create a new simplified DAG node and return true, returning the 878 /// original and new nodes in Old and New. Otherwise, analyze the expression and 879 /// return a mask of Known bits for the expression (used to simplify the 880 /// caller). The Known bits may only be accurate for those bits in the 881 /// OriginalDemandedBits and OriginalDemandedElts. 882 bool TargetLowering::SimplifyDemandedBits( 883 SDValue Op, const APInt &OriginalDemandedBits, 884 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO, 885 unsigned Depth, bool AssumeSingleUse) const { 886 unsigned BitWidth = OriginalDemandedBits.getBitWidth(); 887 assert(Op.getScalarValueSizeInBits() == BitWidth && 888 "Mask size mismatches value type size!"); 889 890 // Don't know anything. 891 Known = KnownBits(BitWidth); 892 893 // TODO: We can probably do more work on calculating the known bits and 894 // simplifying the operations for scalable vectors, but for now we just 895 // bail out. 896 if (Op.getValueType().isScalableVector()) 897 return false; 898 899 unsigned NumElts = OriginalDemandedElts.getBitWidth(); 900 assert((!Op.getValueType().isVector() || 901 NumElts == Op.getValueType().getVectorNumElements()) && 902 "Unexpected vector size"); 903 904 APInt DemandedBits = OriginalDemandedBits; 905 APInt DemandedElts = OriginalDemandedElts; 906 SDLoc dl(Op); 907 auto &DL = TLO.DAG.getDataLayout(); 908 909 // Undef operand. 910 if (Op.isUndef()) 911 return false; 912 913 if (Op.getOpcode() == ISD::Constant) { 914 // We know all of the bits for a constant! 915 Known.One = cast<ConstantSDNode>(Op)->getAPIntValue(); 916 Known.Zero = ~Known.One; 917 return false; 918 } 919 920 // Other users may use these bits. 921 EVT VT = Op.getValueType(); 922 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) { 923 if (Depth != 0) { 924 // If not at the root, Just compute the Known bits to 925 // simplify things downstream. 926 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 927 return false; 928 } 929 // If this is the root being simplified, allow it to have multiple uses, 930 // just set the DemandedBits/Elts to all bits. 931 DemandedBits = APInt::getAllOnesValue(BitWidth); 932 DemandedElts = APInt::getAllOnesValue(NumElts); 933 } else if (OriginalDemandedBits == 0 || OriginalDemandedElts == 0) { 934 // Not demanding any bits/elts from Op. 935 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 936 } else if (Depth >= SelectionDAG::MaxRecursionDepth) { 937 // Limit search depth. 938 return false; 939 } 940 941 KnownBits Known2; 942 switch (Op.getOpcode()) { 943 case ISD::TargetConstant: 944 llvm_unreachable("Can't simplify this node"); 945 case ISD::SCALAR_TO_VECTOR: { 946 if (!DemandedElts[0]) 947 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 948 949 KnownBits SrcKnown; 950 SDValue Src = Op.getOperand(0); 951 unsigned SrcBitWidth = Src.getScalarValueSizeInBits(); 952 APInt SrcDemandedBits = DemandedBits.zextOrSelf(SrcBitWidth); 953 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcKnown, TLO, Depth + 1)) 954 return true; 955 956 // Upper elements are undef, so only get the knownbits if we just demand 957 // the bottom element. 958 if (DemandedElts == 1) 959 Known = SrcKnown.anyextOrTrunc(BitWidth); 960 break; 961 } 962 case ISD::BUILD_VECTOR: 963 // Collect the known bits that are shared by every demanded element. 964 // TODO: Call SimplifyDemandedBits for non-constant demanded elements. 965 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 966 return false; // Don't fall through, will infinitely loop. 967 case ISD::LOAD: { 968 LoadSDNode *LD = cast<LoadSDNode>(Op); 969 if (getTargetConstantFromLoad(LD)) { 970 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 971 return false; // Don't fall through, will infinitely loop. 972 } else if (ISD::isZEXTLoad(Op.getNode()) && Op.getResNo() == 0) { 973 // If this is a ZEXTLoad and we are looking at the loaded value. 974 EVT MemVT = LD->getMemoryVT(); 975 unsigned MemBits = MemVT.getScalarSizeInBits(); 976 Known.Zero.setBitsFrom(MemBits); 977 return false; // Don't fall through, will infinitely loop. 978 } 979 break; 980 } 981 case ISD::INSERT_VECTOR_ELT: { 982 SDValue Vec = Op.getOperand(0); 983 SDValue Scl = Op.getOperand(1); 984 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 985 EVT VecVT = Vec.getValueType(); 986 987 // If index isn't constant, assume we need all vector elements AND the 988 // inserted element. 989 APInt DemandedVecElts(DemandedElts); 990 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) { 991 unsigned Idx = CIdx->getZExtValue(); 992 DemandedVecElts.clearBit(Idx); 993 994 // Inserted element is not required. 995 if (!DemandedElts[Idx]) 996 return TLO.CombineTo(Op, Vec); 997 } 998 999 KnownBits KnownScl; 1000 unsigned NumSclBits = Scl.getScalarValueSizeInBits(); 1001 APInt DemandedSclBits = DemandedBits.zextOrTrunc(NumSclBits); 1002 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1)) 1003 return true; 1004 1005 Known = KnownScl.anyextOrTrunc(BitWidth); 1006 1007 KnownBits KnownVec; 1008 if (SimplifyDemandedBits(Vec, DemandedBits, DemandedVecElts, KnownVec, TLO, 1009 Depth + 1)) 1010 return true; 1011 1012 if (!!DemandedVecElts) { 1013 Known.One &= KnownVec.One; 1014 Known.Zero &= KnownVec.Zero; 1015 } 1016 1017 return false; 1018 } 1019 case ISD::INSERT_SUBVECTOR: { 1020 // Demand any elements from the subvector and the remainder from the src its 1021 // inserted into. 1022 SDValue Src = Op.getOperand(0); 1023 SDValue Sub = Op.getOperand(1); 1024 uint64_t Idx = Op.getConstantOperandVal(2); 1025 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 1026 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 1027 APInt DemandedSrcElts = DemandedElts; 1028 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 1029 1030 KnownBits KnownSub, KnownSrc; 1031 if (SimplifyDemandedBits(Sub, DemandedBits, DemandedSubElts, KnownSub, TLO, 1032 Depth + 1)) 1033 return true; 1034 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, KnownSrc, TLO, 1035 Depth + 1)) 1036 return true; 1037 1038 Known.Zero.setAllBits(); 1039 Known.One.setAllBits(); 1040 if (!!DemandedSubElts) { 1041 Known.One &= KnownSub.One; 1042 Known.Zero &= KnownSub.Zero; 1043 } 1044 if (!!DemandedSrcElts) { 1045 Known.One &= KnownSrc.One; 1046 Known.Zero &= KnownSrc.Zero; 1047 } 1048 1049 // Attempt to avoid multi-use src if we don't need anything from it. 1050 if (!DemandedBits.isAllOnesValue() || !DemandedSubElts.isAllOnesValue() || 1051 !DemandedSrcElts.isAllOnesValue()) { 1052 SDValue NewSub = SimplifyMultipleUseDemandedBits( 1053 Sub, DemandedBits, DemandedSubElts, TLO.DAG, Depth + 1); 1054 SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1055 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1056 if (NewSub || NewSrc) { 1057 NewSub = NewSub ? NewSub : Sub; 1058 NewSrc = NewSrc ? NewSrc : Src; 1059 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc, NewSub, 1060 Op.getOperand(2)); 1061 return TLO.CombineTo(Op, NewOp); 1062 } 1063 } 1064 break; 1065 } 1066 case ISD::EXTRACT_SUBVECTOR: { 1067 // Offset the demanded elts by the subvector index. 1068 SDValue Src = Op.getOperand(0); 1069 if (Src.getValueType().isScalableVector()) 1070 break; 1071 uint64_t Idx = Op.getConstantOperandVal(1); 1072 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 1073 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 1074 1075 if (SimplifyDemandedBits(Src, DemandedBits, DemandedSrcElts, Known, TLO, 1076 Depth + 1)) 1077 return true; 1078 1079 // Attempt to avoid multi-use src if we don't need anything from it. 1080 if (!DemandedBits.isAllOnesValue() || !DemandedSrcElts.isAllOnesValue()) { 1081 SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 1082 Src, DemandedBits, DemandedSrcElts, TLO.DAG, Depth + 1); 1083 if (DemandedSrc) { 1084 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, 1085 Op.getOperand(1)); 1086 return TLO.CombineTo(Op, NewOp); 1087 } 1088 } 1089 break; 1090 } 1091 case ISD::CONCAT_VECTORS: { 1092 Known.Zero.setAllBits(); 1093 Known.One.setAllBits(); 1094 EVT SubVT = Op.getOperand(0).getValueType(); 1095 unsigned NumSubVecs = Op.getNumOperands(); 1096 unsigned NumSubElts = SubVT.getVectorNumElements(); 1097 for (unsigned i = 0; i != NumSubVecs; ++i) { 1098 APInt DemandedSubElts = 1099 DemandedElts.extractBits(NumSubElts, i * NumSubElts); 1100 if (SimplifyDemandedBits(Op.getOperand(i), DemandedBits, DemandedSubElts, 1101 Known2, TLO, Depth + 1)) 1102 return true; 1103 // Known bits are shared by every demanded subvector element. 1104 if (!!DemandedSubElts) { 1105 Known.One &= Known2.One; 1106 Known.Zero &= Known2.Zero; 1107 } 1108 } 1109 break; 1110 } 1111 case ISD::VECTOR_SHUFFLE: { 1112 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 1113 1114 // Collect demanded elements from shuffle operands.. 1115 APInt DemandedLHS(NumElts, 0); 1116 APInt DemandedRHS(NumElts, 0); 1117 for (unsigned i = 0; i != NumElts; ++i) { 1118 if (!DemandedElts[i]) 1119 continue; 1120 int M = ShuffleMask[i]; 1121 if (M < 0) { 1122 // For UNDEF elements, we don't know anything about the common state of 1123 // the shuffle result. 1124 DemandedLHS.clearAllBits(); 1125 DemandedRHS.clearAllBits(); 1126 break; 1127 } 1128 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 1129 if (M < (int)NumElts) 1130 DemandedLHS.setBit(M); 1131 else 1132 DemandedRHS.setBit(M - NumElts); 1133 } 1134 1135 if (!!DemandedLHS || !!DemandedRHS) { 1136 SDValue Op0 = Op.getOperand(0); 1137 SDValue Op1 = Op.getOperand(1); 1138 1139 Known.Zero.setAllBits(); 1140 Known.One.setAllBits(); 1141 if (!!DemandedLHS) { 1142 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO, 1143 Depth + 1)) 1144 return true; 1145 Known.One &= Known2.One; 1146 Known.Zero &= Known2.Zero; 1147 } 1148 if (!!DemandedRHS) { 1149 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedRHS, Known2, TLO, 1150 Depth + 1)) 1151 return true; 1152 Known.One &= Known2.One; 1153 Known.Zero &= Known2.Zero; 1154 } 1155 1156 // Attempt to avoid multi-use ops if we don't need anything from them. 1157 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1158 Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1); 1159 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1160 Op1, DemandedBits, DemandedRHS, TLO.DAG, Depth + 1); 1161 if (DemandedOp0 || DemandedOp1) { 1162 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1163 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1164 SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask); 1165 return TLO.CombineTo(Op, NewOp); 1166 } 1167 } 1168 break; 1169 } 1170 case ISD::AND: { 1171 SDValue Op0 = Op.getOperand(0); 1172 SDValue Op1 = Op.getOperand(1); 1173 1174 // If the RHS is a constant, check to see if the LHS would be zero without 1175 // using the bits from the RHS. Below, we use knowledge about the RHS to 1176 // simplify the LHS, here we're using information from the LHS to simplify 1177 // the RHS. 1178 if (ConstantSDNode *RHSC = isConstOrConstSplat(Op1)) { 1179 // Do not increment Depth here; that can cause an infinite loop. 1180 KnownBits LHSKnown = TLO.DAG.computeKnownBits(Op0, DemandedElts, Depth); 1181 // If the LHS already has zeros where RHSC does, this 'and' is dead. 1182 if ((LHSKnown.Zero & DemandedBits) == 1183 (~RHSC->getAPIntValue() & DemandedBits)) 1184 return TLO.CombineTo(Op, Op0); 1185 1186 // If any of the set bits in the RHS are known zero on the LHS, shrink 1187 // the constant. 1188 if (ShrinkDemandedConstant(Op, ~LHSKnown.Zero & DemandedBits, 1189 DemandedElts, TLO)) 1190 return true; 1191 1192 // Bitwise-not (xor X, -1) is a special case: we don't usually shrink its 1193 // constant, but if this 'and' is only clearing bits that were just set by 1194 // the xor, then this 'and' can be eliminated by shrinking the mask of 1195 // the xor. For example, for a 32-bit X: 1196 // and (xor (srl X, 31), -1), 1 --> xor (srl X, 31), 1 1197 if (isBitwiseNot(Op0) && Op0.hasOneUse() && 1198 LHSKnown.One == ~RHSC->getAPIntValue()) { 1199 SDValue Xor = TLO.DAG.getNode(ISD::XOR, dl, VT, Op0.getOperand(0), Op1); 1200 return TLO.CombineTo(Op, Xor); 1201 } 1202 } 1203 1204 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1205 Depth + 1)) 1206 return true; 1207 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1208 if (SimplifyDemandedBits(Op0, ~Known.Zero & DemandedBits, DemandedElts, 1209 Known2, TLO, Depth + 1)) 1210 return true; 1211 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1212 1213 // Attempt to avoid multi-use ops if we don't need anything from them. 1214 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1215 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1216 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1217 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1218 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1219 if (DemandedOp0 || DemandedOp1) { 1220 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1221 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1222 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1223 return TLO.CombineTo(Op, NewOp); 1224 } 1225 } 1226 1227 // If all of the demanded bits are known one on one side, return the other. 1228 // These bits cannot contribute to the result of the 'and'. 1229 if (DemandedBits.isSubsetOf(Known2.Zero | Known.One)) 1230 return TLO.CombineTo(Op, Op0); 1231 if (DemandedBits.isSubsetOf(Known.Zero | Known2.One)) 1232 return TLO.CombineTo(Op, Op1); 1233 // If all of the demanded bits in the inputs are known zeros, return zero. 1234 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1235 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, dl, VT)); 1236 // If the RHS is a constant, see if we can simplify it. 1237 if (ShrinkDemandedConstant(Op, ~Known2.Zero & DemandedBits, DemandedElts, 1238 TLO)) 1239 return true; 1240 // If the operation can be done in a smaller type, do so. 1241 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1242 return true; 1243 1244 Known &= Known2; 1245 break; 1246 } 1247 case ISD::OR: { 1248 SDValue Op0 = Op.getOperand(0); 1249 SDValue Op1 = Op.getOperand(1); 1250 1251 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1252 Depth + 1)) 1253 return true; 1254 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1255 if (SimplifyDemandedBits(Op0, ~Known.One & DemandedBits, DemandedElts, 1256 Known2, TLO, Depth + 1)) 1257 return true; 1258 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1259 1260 // Attempt to avoid multi-use ops if we don't need anything from them. 1261 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1262 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1263 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1264 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1265 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1266 if (DemandedOp0 || DemandedOp1) { 1267 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1268 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1269 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1270 return TLO.CombineTo(Op, NewOp); 1271 } 1272 } 1273 1274 // If all of the demanded bits are known zero on one side, return the other. 1275 // These bits cannot contribute to the result of the 'or'. 1276 if (DemandedBits.isSubsetOf(Known2.One | Known.Zero)) 1277 return TLO.CombineTo(Op, Op0); 1278 if (DemandedBits.isSubsetOf(Known.One | Known2.Zero)) 1279 return TLO.CombineTo(Op, Op1); 1280 // If the RHS is a constant, see if we can simplify it. 1281 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1282 return true; 1283 // If the operation can be done in a smaller type, do so. 1284 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1285 return true; 1286 1287 Known |= Known2; 1288 break; 1289 } 1290 case ISD::XOR: { 1291 SDValue Op0 = Op.getOperand(0); 1292 SDValue Op1 = Op.getOperand(1); 1293 1294 if (SimplifyDemandedBits(Op1, DemandedBits, DemandedElts, Known, TLO, 1295 Depth + 1)) 1296 return true; 1297 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1298 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedElts, Known2, TLO, 1299 Depth + 1)) 1300 return true; 1301 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1302 1303 // Attempt to avoid multi-use ops if we don't need anything from them. 1304 if (!DemandedBits.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1305 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1306 Op0, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1307 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 1308 Op1, DemandedBits, DemandedElts, TLO.DAG, Depth + 1); 1309 if (DemandedOp0 || DemandedOp1) { 1310 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 1311 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 1312 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1); 1313 return TLO.CombineTo(Op, NewOp); 1314 } 1315 } 1316 1317 // If all of the demanded bits are known zero on one side, return the other. 1318 // These bits cannot contribute to the result of the 'xor'. 1319 if (DemandedBits.isSubsetOf(Known.Zero)) 1320 return TLO.CombineTo(Op, Op0); 1321 if (DemandedBits.isSubsetOf(Known2.Zero)) 1322 return TLO.CombineTo(Op, Op1); 1323 // If the operation can be done in a smaller type, do so. 1324 if (ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1325 return true; 1326 1327 // If all of the unknown bits are known to be zero on one side or the other 1328 // (but not both) turn this into an *inclusive* or. 1329 // e.g. (A & C1)^(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0 1330 if (DemandedBits.isSubsetOf(Known.Zero | Known2.Zero)) 1331 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::OR, dl, VT, Op0, Op1)); 1332 1333 ConstantSDNode* C = isConstOrConstSplat(Op1, DemandedElts); 1334 if (C) { 1335 // If one side is a constant, and all of the known set bits on the other 1336 // side are also set in the constant, turn this into an AND, as we know 1337 // the bits will be cleared. 1338 // e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2 1339 // NB: it is okay if more bits are known than are requested 1340 if (C->getAPIntValue() == Known2.One) { 1341 SDValue ANDC = 1342 TLO.DAG.getConstant(~C->getAPIntValue() & DemandedBits, dl, VT); 1343 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT, Op0, ANDC)); 1344 } 1345 1346 // If the RHS is a constant, see if we can change it. Don't alter a -1 1347 // constant because that's a 'not' op, and that is better for combining 1348 // and codegen. 1349 if (!C->isAllOnesValue() && 1350 DemandedBits.isSubsetOf(C->getAPIntValue())) { 1351 // We're flipping all demanded bits. Flip the undemanded bits too. 1352 SDValue New = TLO.DAG.getNOT(dl, Op0, VT); 1353 return TLO.CombineTo(Op, New); 1354 } 1355 } 1356 1357 // If we can't turn this into a 'not', try to shrink the constant. 1358 if (!C || !C->isAllOnesValue()) 1359 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1360 return true; 1361 1362 Known ^= Known2; 1363 break; 1364 } 1365 case ISD::SELECT: 1366 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known, TLO, 1367 Depth + 1)) 1368 return true; 1369 if (SimplifyDemandedBits(Op.getOperand(1), DemandedBits, Known2, TLO, 1370 Depth + 1)) 1371 return true; 1372 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1373 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1374 1375 // If the operands are constants, see if we can simplify them. 1376 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1377 return true; 1378 1379 // Only known if known in both the LHS and RHS. 1380 Known.One &= Known2.One; 1381 Known.Zero &= Known2.Zero; 1382 break; 1383 case ISD::SELECT_CC: 1384 if (SimplifyDemandedBits(Op.getOperand(3), DemandedBits, Known, TLO, 1385 Depth + 1)) 1386 return true; 1387 if (SimplifyDemandedBits(Op.getOperand(2), DemandedBits, Known2, TLO, 1388 Depth + 1)) 1389 return true; 1390 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1391 assert(!Known2.hasConflict() && "Bits known to be one AND zero?"); 1392 1393 // If the operands are constants, see if we can simplify them. 1394 if (ShrinkDemandedConstant(Op, DemandedBits, DemandedElts, TLO)) 1395 return true; 1396 1397 // Only known if known in both the LHS and RHS. 1398 Known.One &= Known2.One; 1399 Known.Zero &= Known2.Zero; 1400 break; 1401 case ISD::SETCC: { 1402 SDValue Op0 = Op.getOperand(0); 1403 SDValue Op1 = Op.getOperand(1); 1404 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 1405 // If (1) we only need the sign-bit, (2) the setcc operands are the same 1406 // width as the setcc result, and (3) the result of a setcc conforms to 0 or 1407 // -1, we may be able to bypass the setcc. 1408 if (DemandedBits.isSignMask() && 1409 Op0.getScalarValueSizeInBits() == BitWidth && 1410 getBooleanContents(Op0.getValueType()) == 1411 BooleanContent::ZeroOrNegativeOneBooleanContent) { 1412 // If we're testing X < 0, then this compare isn't needed - just use X! 1413 // FIXME: We're limiting to integer types here, but this should also work 1414 // if we don't care about FP signed-zero. The use of SETLT with FP means 1415 // that we don't care about NaNs. 1416 if (CC == ISD::SETLT && Op1.getValueType().isInteger() && 1417 (isNullConstant(Op1) || ISD::isBuildVectorAllZeros(Op1.getNode()))) 1418 return TLO.CombineTo(Op, Op0); 1419 1420 // TODO: Should we check for other forms of sign-bit comparisons? 1421 // Examples: X <= -1, X >= 0 1422 } 1423 if (getBooleanContents(Op0.getValueType()) == 1424 TargetLowering::ZeroOrOneBooleanContent && 1425 BitWidth > 1) 1426 Known.Zero.setBitsFrom(1); 1427 break; 1428 } 1429 case ISD::SHL: { 1430 SDValue Op0 = Op.getOperand(0); 1431 SDValue Op1 = Op.getOperand(1); 1432 EVT ShiftVT = Op1.getValueType(); 1433 1434 if (const APInt *SA = 1435 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1436 unsigned ShAmt = SA->getZExtValue(); 1437 if (ShAmt == 0) 1438 return TLO.CombineTo(Op, Op0); 1439 1440 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a 1441 // single shift. We can do this if the bottom bits (which are shifted 1442 // out) are never demanded. 1443 // TODO - support non-uniform vector amounts. 1444 if (Op0.getOpcode() == ISD::SRL) { 1445 if (!DemandedBits.intersects(APInt::getLowBitsSet(BitWidth, ShAmt))) { 1446 if (const APInt *SA2 = 1447 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1448 unsigned C1 = SA2->getZExtValue(); 1449 unsigned Opc = ISD::SHL; 1450 int Diff = ShAmt - C1; 1451 if (Diff < 0) { 1452 Diff = -Diff; 1453 Opc = ISD::SRL; 1454 } 1455 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1456 return TLO.CombineTo( 1457 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1458 } 1459 } 1460 } 1461 1462 // Convert (shl (anyext x, c)) to (anyext (shl x, c)) if the high bits 1463 // are not demanded. This will likely allow the anyext to be folded away. 1464 // TODO - support non-uniform vector amounts. 1465 if (Op0.getOpcode() == ISD::ANY_EXTEND) { 1466 SDValue InnerOp = Op0.getOperand(0); 1467 EVT InnerVT = InnerOp.getValueType(); 1468 unsigned InnerBits = InnerVT.getScalarSizeInBits(); 1469 if (ShAmt < InnerBits && DemandedBits.getActiveBits() <= InnerBits && 1470 isTypeDesirableForOp(ISD::SHL, InnerVT)) { 1471 EVT ShTy = getShiftAmountTy(InnerVT, DL); 1472 if (!APInt(BitWidth, ShAmt).isIntN(ShTy.getSizeInBits())) 1473 ShTy = InnerVT; 1474 SDValue NarrowShl = 1475 TLO.DAG.getNode(ISD::SHL, dl, InnerVT, InnerOp, 1476 TLO.DAG.getConstant(ShAmt, dl, ShTy)); 1477 return TLO.CombineTo( 1478 Op, TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, NarrowShl)); 1479 } 1480 1481 // Repeat the SHL optimization above in cases where an extension 1482 // intervenes: (shl (anyext (shr x, c1)), c2) to 1483 // (shl (anyext x), c2-c1). This requires that the bottom c1 bits 1484 // aren't demanded (as above) and that the shifted upper c1 bits of 1485 // x aren't demanded. 1486 // TODO - support non-uniform vector amounts. 1487 if (Op0.hasOneUse() && InnerOp.getOpcode() == ISD::SRL && 1488 InnerOp.hasOneUse()) { 1489 if (const APInt *SA2 = 1490 TLO.DAG.getValidShiftAmountConstant(InnerOp, DemandedElts)) { 1491 unsigned InnerShAmt = SA2->getZExtValue(); 1492 if (InnerShAmt < ShAmt && InnerShAmt < InnerBits && 1493 DemandedBits.getActiveBits() <= 1494 (InnerBits - InnerShAmt + ShAmt) && 1495 DemandedBits.countTrailingZeros() >= ShAmt) { 1496 SDValue NewSA = 1497 TLO.DAG.getConstant(ShAmt - InnerShAmt, dl, ShiftVT); 1498 SDValue NewExt = TLO.DAG.getNode(ISD::ANY_EXTEND, dl, VT, 1499 InnerOp.getOperand(0)); 1500 return TLO.CombineTo( 1501 Op, TLO.DAG.getNode(ISD::SHL, dl, VT, NewExt, NewSA)); 1502 } 1503 } 1504 } 1505 } 1506 1507 APInt InDemandedMask = DemandedBits.lshr(ShAmt); 1508 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1509 Depth + 1)) 1510 return true; 1511 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1512 Known.Zero <<= ShAmt; 1513 Known.One <<= ShAmt; 1514 // low bits known zero. 1515 Known.Zero.setLowBits(ShAmt); 1516 1517 // Try shrinking the operation as long as the shift amount will still be 1518 // in range. 1519 if ((ShAmt < DemandedBits.getActiveBits()) && 1520 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) 1521 return true; 1522 } 1523 1524 // If we are only demanding sign bits then we can use the shift source 1525 // directly. 1526 if (const APInt *MaxSA = 1527 TLO.DAG.getValidMaximumShiftAmountConstant(Op, DemandedElts)) { 1528 unsigned ShAmt = MaxSA->getZExtValue(); 1529 unsigned NumSignBits = 1530 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1531 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1532 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= (UpperDemandedBits)) 1533 return TLO.CombineTo(Op, Op0); 1534 } 1535 break; 1536 } 1537 case ISD::SRL: { 1538 SDValue Op0 = Op.getOperand(0); 1539 SDValue Op1 = Op.getOperand(1); 1540 EVT ShiftVT = Op1.getValueType(); 1541 1542 if (const APInt *SA = 1543 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1544 unsigned ShAmt = SA->getZExtValue(); 1545 if (ShAmt == 0) 1546 return TLO.CombineTo(Op, Op0); 1547 1548 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a 1549 // single shift. We can do this if the top bits (which are shifted out) 1550 // are never demanded. 1551 // TODO - support non-uniform vector amounts. 1552 if (Op0.getOpcode() == ISD::SHL) { 1553 if (!DemandedBits.intersects(APInt::getHighBitsSet(BitWidth, ShAmt))) { 1554 if (const APInt *SA2 = 1555 TLO.DAG.getValidShiftAmountConstant(Op0, DemandedElts)) { 1556 unsigned C1 = SA2->getZExtValue(); 1557 unsigned Opc = ISD::SRL; 1558 int Diff = ShAmt - C1; 1559 if (Diff < 0) { 1560 Diff = -Diff; 1561 Opc = ISD::SHL; 1562 } 1563 SDValue NewSA = TLO.DAG.getConstant(Diff, dl, ShiftVT); 1564 return TLO.CombineTo( 1565 Op, TLO.DAG.getNode(Opc, dl, VT, Op0.getOperand(0), NewSA)); 1566 } 1567 } 1568 } 1569 1570 APInt InDemandedMask = (DemandedBits << ShAmt); 1571 1572 // If the shift is exact, then it does demand the low bits (and knows that 1573 // they are zero). 1574 if (Op->getFlags().hasExact()) 1575 InDemandedMask.setLowBits(ShAmt); 1576 1577 // Compute the new bits that are at the top now. 1578 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1579 Depth + 1)) 1580 return true; 1581 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1582 Known.Zero.lshrInPlace(ShAmt); 1583 Known.One.lshrInPlace(ShAmt); 1584 // High bits known zero. 1585 Known.Zero.setHighBits(ShAmt); 1586 } 1587 break; 1588 } 1589 case ISD::SRA: { 1590 SDValue Op0 = Op.getOperand(0); 1591 SDValue Op1 = Op.getOperand(1); 1592 EVT ShiftVT = Op1.getValueType(); 1593 1594 // If we only want bits that already match the signbit then we don't need 1595 // to shift. 1596 unsigned NumHiDemandedBits = BitWidth - DemandedBits.countTrailingZeros(); 1597 if (TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1) >= 1598 NumHiDemandedBits) 1599 return TLO.CombineTo(Op, Op0); 1600 1601 // If this is an arithmetic shift right and only the low-bit is set, we can 1602 // always convert this into a logical shr, even if the shift amount is 1603 // variable. The low bit of the shift cannot be an input sign bit unless 1604 // the shift amount is >= the size of the datatype, which is undefined. 1605 if (DemandedBits.isOneValue()) 1606 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1)); 1607 1608 if (const APInt *SA = 1609 TLO.DAG.getValidShiftAmountConstant(Op, DemandedElts)) { 1610 unsigned ShAmt = SA->getZExtValue(); 1611 if (ShAmt == 0) 1612 return TLO.CombineTo(Op, Op0); 1613 1614 APInt InDemandedMask = (DemandedBits << ShAmt); 1615 1616 // If the shift is exact, then it does demand the low bits (and knows that 1617 // they are zero). 1618 if (Op->getFlags().hasExact()) 1619 InDemandedMask.setLowBits(ShAmt); 1620 1621 // If any of the demanded bits are produced by the sign extension, we also 1622 // demand the input sign bit. 1623 if (DemandedBits.countLeadingZeros() < ShAmt) 1624 InDemandedMask.setSignBit(); 1625 1626 if (SimplifyDemandedBits(Op0, InDemandedMask, DemandedElts, Known, TLO, 1627 Depth + 1)) 1628 return true; 1629 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1630 Known.Zero.lshrInPlace(ShAmt); 1631 Known.One.lshrInPlace(ShAmt); 1632 1633 // If the input sign bit is known to be zero, or if none of the top bits 1634 // are demanded, turn this into an unsigned shift right. 1635 if (Known.Zero[BitWidth - ShAmt - 1] || 1636 DemandedBits.countLeadingZeros() >= ShAmt) { 1637 SDNodeFlags Flags; 1638 Flags.setExact(Op->getFlags().hasExact()); 1639 return TLO.CombineTo( 1640 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, Op1, Flags)); 1641 } 1642 1643 int Log2 = DemandedBits.exactLogBase2(); 1644 if (Log2 >= 0) { 1645 // The bit must come from the sign. 1646 SDValue NewSA = TLO.DAG.getConstant(BitWidth - 1 - Log2, dl, ShiftVT); 1647 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::SRL, dl, VT, Op0, NewSA)); 1648 } 1649 1650 if (Known.One[BitWidth - ShAmt - 1]) 1651 // New bits are known one. 1652 Known.One.setHighBits(ShAmt); 1653 1654 // Attempt to avoid multi-use ops if we don't need anything from them. 1655 if (!InDemandedMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 1656 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 1657 Op0, InDemandedMask, DemandedElts, TLO.DAG, Depth + 1); 1658 if (DemandedOp0) { 1659 SDValue NewOp = TLO.DAG.getNode(ISD::SRA, dl, VT, DemandedOp0, Op1); 1660 return TLO.CombineTo(Op, NewOp); 1661 } 1662 } 1663 } 1664 break; 1665 } 1666 case ISD::FSHL: 1667 case ISD::FSHR: { 1668 SDValue Op0 = Op.getOperand(0); 1669 SDValue Op1 = Op.getOperand(1); 1670 SDValue Op2 = Op.getOperand(2); 1671 bool IsFSHL = (Op.getOpcode() == ISD::FSHL); 1672 1673 if (ConstantSDNode *SA = isConstOrConstSplat(Op2, DemandedElts)) { 1674 unsigned Amt = SA->getAPIntValue().urem(BitWidth); 1675 1676 // For fshl, 0-shift returns the 1st arg. 1677 // For fshr, 0-shift returns the 2nd arg. 1678 if (Amt == 0) { 1679 if (SimplifyDemandedBits(IsFSHL ? Op0 : Op1, DemandedBits, DemandedElts, 1680 Known, TLO, Depth + 1)) 1681 return true; 1682 break; 1683 } 1684 1685 // fshl: (Op0 << Amt) | (Op1 >> (BW - Amt)) 1686 // fshr: (Op0 << (BW - Amt)) | (Op1 >> Amt) 1687 APInt Demanded0 = DemandedBits.lshr(IsFSHL ? Amt : (BitWidth - Amt)); 1688 APInt Demanded1 = DemandedBits << (IsFSHL ? (BitWidth - Amt) : Amt); 1689 if (SimplifyDemandedBits(Op0, Demanded0, DemandedElts, Known2, TLO, 1690 Depth + 1)) 1691 return true; 1692 if (SimplifyDemandedBits(Op1, Demanded1, DemandedElts, Known, TLO, 1693 Depth + 1)) 1694 return true; 1695 1696 Known2.One <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1697 Known2.Zero <<= (IsFSHL ? Amt : (BitWidth - Amt)); 1698 Known.One.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1699 Known.Zero.lshrInPlace(IsFSHL ? (BitWidth - Amt) : Amt); 1700 Known.One |= Known2.One; 1701 Known.Zero |= Known2.Zero; 1702 } 1703 1704 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1705 if (isPowerOf2_32(BitWidth)) { 1706 APInt DemandedAmtBits(Op2.getScalarValueSizeInBits(), BitWidth - 1); 1707 if (SimplifyDemandedBits(Op2, DemandedAmtBits, DemandedElts, 1708 Known2, TLO, Depth + 1)) 1709 return true; 1710 } 1711 break; 1712 } 1713 case ISD::ROTL: 1714 case ISD::ROTR: { 1715 SDValue Op0 = Op.getOperand(0); 1716 SDValue Op1 = Op.getOperand(1); 1717 1718 // If we're rotating an 0/-1 value, then it stays an 0/-1 value. 1719 if (BitWidth == TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1)) 1720 return TLO.CombineTo(Op, Op0); 1721 1722 // For pow-2 bitwidths we only demand the bottom modulo amt bits. 1723 if (isPowerOf2_32(BitWidth)) { 1724 APInt DemandedAmtBits(Op1.getScalarValueSizeInBits(), BitWidth - 1); 1725 if (SimplifyDemandedBits(Op1, DemandedAmtBits, DemandedElts, Known2, TLO, 1726 Depth + 1)) 1727 return true; 1728 } 1729 break; 1730 } 1731 case ISD::BITREVERSE: { 1732 SDValue Src = Op.getOperand(0); 1733 APInt DemandedSrcBits = DemandedBits.reverseBits(); 1734 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1735 Depth + 1)) 1736 return true; 1737 Known.One = Known2.One.reverseBits(); 1738 Known.Zero = Known2.Zero.reverseBits(); 1739 break; 1740 } 1741 case ISD::BSWAP: { 1742 SDValue Src = Op.getOperand(0); 1743 APInt DemandedSrcBits = DemandedBits.byteSwap(); 1744 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, Known2, TLO, 1745 Depth + 1)) 1746 return true; 1747 Known.One = Known2.One.byteSwap(); 1748 Known.Zero = Known2.Zero.byteSwap(); 1749 break; 1750 } 1751 case ISD::SIGN_EXTEND_INREG: { 1752 SDValue Op0 = Op.getOperand(0); 1753 EVT ExVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 1754 unsigned ExVTBits = ExVT.getScalarSizeInBits(); 1755 1756 // If we only care about the highest bit, don't bother shifting right. 1757 if (DemandedBits.isSignMask()) { 1758 unsigned NumSignBits = 1759 TLO.DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1); 1760 bool AlreadySignExtended = NumSignBits >= BitWidth - ExVTBits + 1; 1761 // However if the input is already sign extended we expect the sign 1762 // extension to be dropped altogether later and do not simplify. 1763 if (!AlreadySignExtended) { 1764 // Compute the correct shift amount type, which must be getShiftAmountTy 1765 // for scalar types after legalization. 1766 EVT ShiftAmtTy = VT; 1767 if (TLO.LegalTypes() && !ShiftAmtTy.isVector()) 1768 ShiftAmtTy = getShiftAmountTy(ShiftAmtTy, DL); 1769 1770 SDValue ShiftAmt = 1771 TLO.DAG.getConstant(BitWidth - ExVTBits, dl, ShiftAmtTy); 1772 return TLO.CombineTo(Op, 1773 TLO.DAG.getNode(ISD::SHL, dl, VT, Op0, ShiftAmt)); 1774 } 1775 } 1776 1777 // If none of the extended bits are demanded, eliminate the sextinreg. 1778 if (DemandedBits.getActiveBits() <= ExVTBits) 1779 return TLO.CombineTo(Op, Op0); 1780 1781 APInt InputDemandedBits = DemandedBits.getLoBits(ExVTBits); 1782 1783 // Since the sign extended bits are demanded, we know that the sign 1784 // bit is demanded. 1785 InputDemandedBits.setBit(ExVTBits - 1); 1786 1787 if (SimplifyDemandedBits(Op0, InputDemandedBits, Known, TLO, Depth + 1)) 1788 return true; 1789 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1790 1791 // If the sign bit of the input is known set or clear, then we know the 1792 // top bits of the result. 1793 1794 // If the input sign bit is known zero, convert this into a zero extension. 1795 if (Known.Zero[ExVTBits - 1]) 1796 return TLO.CombineTo(Op, TLO.DAG.getZeroExtendInReg(Op0, dl, ExVT)); 1797 1798 APInt Mask = APInt::getLowBitsSet(BitWidth, ExVTBits); 1799 if (Known.One[ExVTBits - 1]) { // Input sign bit known set 1800 Known.One.setBitsFrom(ExVTBits); 1801 Known.Zero &= Mask; 1802 } else { // Input sign bit unknown 1803 Known.Zero &= Mask; 1804 Known.One &= Mask; 1805 } 1806 break; 1807 } 1808 case ISD::BUILD_PAIR: { 1809 EVT HalfVT = Op.getOperand(0).getValueType(); 1810 unsigned HalfBitWidth = HalfVT.getScalarSizeInBits(); 1811 1812 APInt MaskLo = DemandedBits.getLoBits(HalfBitWidth).trunc(HalfBitWidth); 1813 APInt MaskHi = DemandedBits.getHiBits(HalfBitWidth).trunc(HalfBitWidth); 1814 1815 KnownBits KnownLo, KnownHi; 1816 1817 if (SimplifyDemandedBits(Op.getOperand(0), MaskLo, KnownLo, TLO, Depth + 1)) 1818 return true; 1819 1820 if (SimplifyDemandedBits(Op.getOperand(1), MaskHi, KnownHi, TLO, Depth + 1)) 1821 return true; 1822 1823 Known.Zero = KnownLo.Zero.zext(BitWidth) | 1824 KnownHi.Zero.zext(BitWidth).shl(HalfBitWidth); 1825 1826 Known.One = KnownLo.One.zext(BitWidth) | 1827 KnownHi.One.zext(BitWidth).shl(HalfBitWidth); 1828 break; 1829 } 1830 case ISD::ZERO_EXTEND: 1831 case ISD::ZERO_EXTEND_VECTOR_INREG: { 1832 SDValue Src = Op.getOperand(0); 1833 EVT SrcVT = Src.getValueType(); 1834 unsigned InBits = SrcVT.getScalarSizeInBits(); 1835 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1836 bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG; 1837 1838 // If none of the top bits are demanded, convert this into an any_extend. 1839 if (DemandedBits.getActiveBits() <= InBits) { 1840 // If we only need the non-extended bits of the bottom element 1841 // then we can just bitcast to the result. 1842 if (IsVecInReg && DemandedElts == 1 && 1843 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1844 TLO.DAG.getDataLayout().isLittleEndian()) 1845 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1846 1847 unsigned Opc = 1848 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 1849 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1850 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1851 } 1852 1853 APInt InDemandedBits = DemandedBits.trunc(InBits); 1854 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1855 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1856 Depth + 1)) 1857 return true; 1858 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1859 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1860 Known = Known.zext(BitWidth); 1861 1862 // Attempt to avoid multi-use ops if we don't need anything from them. 1863 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1864 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 1865 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 1866 break; 1867 } 1868 case ISD::SIGN_EXTEND: 1869 case ISD::SIGN_EXTEND_VECTOR_INREG: { 1870 SDValue Src = Op.getOperand(0); 1871 EVT SrcVT = Src.getValueType(); 1872 unsigned InBits = SrcVT.getScalarSizeInBits(); 1873 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1874 bool IsVecInReg = Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG; 1875 1876 // If none of the top bits are demanded, convert this into an any_extend. 1877 if (DemandedBits.getActiveBits() <= InBits) { 1878 // If we only need the non-extended bits of the bottom element 1879 // then we can just bitcast to the result. 1880 if (IsVecInReg && DemandedElts == 1 && 1881 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1882 TLO.DAG.getDataLayout().isLittleEndian()) 1883 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1884 1885 unsigned Opc = 1886 IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND; 1887 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1888 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1889 } 1890 1891 APInt InDemandedBits = DemandedBits.trunc(InBits); 1892 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1893 1894 // Since some of the sign extended bits are demanded, we know that the sign 1895 // bit is demanded. 1896 InDemandedBits.setBit(InBits - 1); 1897 1898 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1899 Depth + 1)) 1900 return true; 1901 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1902 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1903 1904 // If the sign bit is known one, the top bits match. 1905 Known = Known.sext(BitWidth); 1906 1907 // If the sign bit is known zero, convert this to a zero extend. 1908 if (Known.isNonNegative()) { 1909 unsigned Opc = 1910 IsVecInReg ? ISD::ZERO_EXTEND_VECTOR_INREG : ISD::ZERO_EXTEND; 1911 if (!TLO.LegalOperations() || isOperationLegal(Opc, VT)) 1912 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src)); 1913 } 1914 1915 // Attempt to avoid multi-use ops if we don't need anything from them. 1916 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1917 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 1918 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 1919 break; 1920 } 1921 case ISD::ANY_EXTEND: 1922 case ISD::ANY_EXTEND_VECTOR_INREG: { 1923 SDValue Src = Op.getOperand(0); 1924 EVT SrcVT = Src.getValueType(); 1925 unsigned InBits = SrcVT.getScalarSizeInBits(); 1926 unsigned InElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 1927 bool IsVecInReg = Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG; 1928 1929 // If we only need the bottom element then we can just bitcast. 1930 // TODO: Handle ANY_EXTEND? 1931 if (IsVecInReg && DemandedElts == 1 && 1932 VT.getSizeInBits() == SrcVT.getSizeInBits() && 1933 TLO.DAG.getDataLayout().isLittleEndian()) 1934 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 1935 1936 APInt InDemandedBits = DemandedBits.trunc(InBits); 1937 APInt InDemandedElts = DemandedElts.zextOrSelf(InElts); 1938 if (SimplifyDemandedBits(Src, InDemandedBits, InDemandedElts, Known, TLO, 1939 Depth + 1)) 1940 return true; 1941 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 1942 assert(Known.getBitWidth() == InBits && "Src width has changed?"); 1943 Known = Known.anyext(BitWidth); 1944 1945 // Attempt to avoid multi-use ops if we don't need anything from them. 1946 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1947 Src, InDemandedBits, InDemandedElts, TLO.DAG, Depth + 1)) 1948 return TLO.CombineTo(Op, TLO.DAG.getNode(Op.getOpcode(), dl, VT, NewSrc)); 1949 break; 1950 } 1951 case ISD::TRUNCATE: { 1952 SDValue Src = Op.getOperand(0); 1953 1954 // Simplify the input, using demanded bit information, and compute the known 1955 // zero/one bits live out. 1956 unsigned OperandBitWidth = Src.getScalarValueSizeInBits(); 1957 APInt TruncMask = DemandedBits.zext(OperandBitWidth); 1958 if (SimplifyDemandedBits(Src, TruncMask, Known, TLO, Depth + 1)) 1959 return true; 1960 Known = Known.trunc(BitWidth); 1961 1962 // Attempt to avoid multi-use ops if we don't need anything from them. 1963 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits( 1964 Src, TruncMask, DemandedElts, TLO.DAG, Depth + 1)) 1965 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, NewSrc)); 1966 1967 // If the input is only used by this truncate, see if we can shrink it based 1968 // on the known demanded bits. 1969 if (Src.getNode()->hasOneUse()) { 1970 switch (Src.getOpcode()) { 1971 default: 1972 break; 1973 case ISD::SRL: 1974 // Shrink SRL by a constant if none of the high bits shifted in are 1975 // demanded. 1976 if (TLO.LegalTypes() && !isTypeDesirableForOp(ISD::SRL, VT)) 1977 // Do not turn (vt1 truncate (vt2 srl)) into (vt1 srl) if vt1 is 1978 // undesirable. 1979 break; 1980 1981 SDValue ShAmt = Src.getOperand(1); 1982 auto *ShAmtC = dyn_cast<ConstantSDNode>(ShAmt); 1983 if (!ShAmtC || ShAmtC->getAPIntValue().uge(BitWidth)) 1984 break; 1985 uint64_t ShVal = ShAmtC->getZExtValue(); 1986 1987 APInt HighBits = 1988 APInt::getHighBitsSet(OperandBitWidth, OperandBitWidth - BitWidth); 1989 HighBits.lshrInPlace(ShVal); 1990 HighBits = HighBits.trunc(BitWidth); 1991 1992 if (!(HighBits & DemandedBits)) { 1993 // None of the shifted in bits are needed. Add a truncate of the 1994 // shift input, then shift it. 1995 if (TLO.LegalTypes()) 1996 ShAmt = TLO.DAG.getConstant(ShVal, dl, getShiftAmountTy(VT, DL)); 1997 SDValue NewTrunc = 1998 TLO.DAG.getNode(ISD::TRUNCATE, dl, VT, Src.getOperand(0)); 1999 return TLO.CombineTo( 2000 Op, TLO.DAG.getNode(ISD::SRL, dl, VT, NewTrunc, ShAmt)); 2001 } 2002 break; 2003 } 2004 } 2005 2006 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2007 break; 2008 } 2009 case ISD::AssertZext: { 2010 // AssertZext demands all of the high bits, plus any of the low bits 2011 // demanded by its users. 2012 EVT ZVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2013 APInt InMask = APInt::getLowBitsSet(BitWidth, ZVT.getSizeInBits()); 2014 if (SimplifyDemandedBits(Op.getOperand(0), ~InMask | DemandedBits, Known, 2015 TLO, Depth + 1)) 2016 return true; 2017 assert(!Known.hasConflict() && "Bits known to be one AND zero?"); 2018 2019 Known.Zero |= ~InMask; 2020 break; 2021 } 2022 case ISD::EXTRACT_VECTOR_ELT: { 2023 SDValue Src = Op.getOperand(0); 2024 SDValue Idx = Op.getOperand(1); 2025 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2026 unsigned EltBitWidth = Src.getScalarValueSizeInBits(); 2027 2028 // Demand the bits from every vector element without a constant index. 2029 APInt DemandedSrcElts = APInt::getAllOnesValue(NumSrcElts); 2030 if (auto *CIdx = dyn_cast<ConstantSDNode>(Idx)) 2031 if (CIdx->getAPIntValue().ult(NumSrcElts)) 2032 DemandedSrcElts = APInt::getOneBitSet(NumSrcElts, CIdx->getZExtValue()); 2033 2034 // If BitWidth > EltBitWidth the value is anyext:ed. So we do not know 2035 // anything about the extended bits. 2036 APInt DemandedSrcBits = DemandedBits; 2037 if (BitWidth > EltBitWidth) 2038 DemandedSrcBits = DemandedSrcBits.trunc(EltBitWidth); 2039 2040 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, Known2, TLO, 2041 Depth + 1)) 2042 return true; 2043 2044 // Attempt to avoid multi-use ops if we don't need anything from them. 2045 if (!DemandedSrcBits.isAllOnesValue() || 2046 !DemandedSrcElts.isAllOnesValue()) { 2047 if (SDValue DemandedSrc = SimplifyMultipleUseDemandedBits( 2048 Src, DemandedSrcBits, DemandedSrcElts, TLO.DAG, Depth + 1)) { 2049 SDValue NewOp = 2050 TLO.DAG.getNode(Op.getOpcode(), dl, VT, DemandedSrc, Idx); 2051 return TLO.CombineTo(Op, NewOp); 2052 } 2053 } 2054 2055 Known = Known2; 2056 if (BitWidth > EltBitWidth) 2057 Known = Known.anyext(BitWidth); 2058 break; 2059 } 2060 case ISD::BITCAST: { 2061 SDValue Src = Op.getOperand(0); 2062 EVT SrcVT = Src.getValueType(); 2063 unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits(); 2064 2065 // If this is an FP->Int bitcast and if the sign bit is the only 2066 // thing demanded, turn this into a FGETSIGN. 2067 if (!TLO.LegalOperations() && !VT.isVector() && !SrcVT.isVector() && 2068 DemandedBits == APInt::getSignMask(Op.getValueSizeInBits()) && 2069 SrcVT.isFloatingPoint()) { 2070 bool OpVTLegal = isOperationLegalOrCustom(ISD::FGETSIGN, VT); 2071 bool i32Legal = isOperationLegalOrCustom(ISD::FGETSIGN, MVT::i32); 2072 if ((OpVTLegal || i32Legal) && VT.isSimple() && SrcVT != MVT::f16 && 2073 SrcVT != MVT::f128) { 2074 // Cannot eliminate/lower SHL for f128 yet. 2075 EVT Ty = OpVTLegal ? VT : MVT::i32; 2076 // Make a FGETSIGN + SHL to move the sign bit into the appropriate 2077 // place. We expect the SHL to be eliminated by other optimizations. 2078 SDValue Sign = TLO.DAG.getNode(ISD::FGETSIGN, dl, Ty, Src); 2079 unsigned OpVTSizeInBits = Op.getValueSizeInBits(); 2080 if (!OpVTLegal && OpVTSizeInBits > 32) 2081 Sign = TLO.DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Sign); 2082 unsigned ShVal = Op.getValueSizeInBits() - 1; 2083 SDValue ShAmt = TLO.DAG.getConstant(ShVal, dl, VT); 2084 return TLO.CombineTo(Op, 2085 TLO.DAG.getNode(ISD::SHL, dl, VT, Sign, ShAmt)); 2086 } 2087 } 2088 2089 // Bitcast from a vector using SimplifyDemanded Bits/VectorElts. 2090 // Demand the elt/bit if any of the original elts/bits are demanded. 2091 // TODO - bigendian once we have test coverage. 2092 if (SrcVT.isVector() && (BitWidth % NumSrcEltBits) == 0 && 2093 TLO.DAG.getDataLayout().isLittleEndian()) { 2094 unsigned Scale = BitWidth / NumSrcEltBits; 2095 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2096 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 2097 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 2098 for (unsigned i = 0; i != Scale; ++i) { 2099 unsigned Offset = i * NumSrcEltBits; 2100 APInt Sub = DemandedBits.extractBits(NumSrcEltBits, Offset); 2101 if (!Sub.isNullValue()) { 2102 DemandedSrcBits |= Sub; 2103 for (unsigned j = 0; j != NumElts; ++j) 2104 if (DemandedElts[j]) 2105 DemandedSrcElts.setBit((j * Scale) + i); 2106 } 2107 } 2108 2109 APInt KnownSrcUndef, KnownSrcZero; 2110 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2111 KnownSrcZero, TLO, Depth + 1)) 2112 return true; 2113 2114 KnownBits KnownSrcBits; 2115 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2116 KnownSrcBits, TLO, Depth + 1)) 2117 return true; 2118 } else if ((NumSrcEltBits % BitWidth) == 0 && 2119 TLO.DAG.getDataLayout().isLittleEndian()) { 2120 unsigned Scale = NumSrcEltBits / BitWidth; 2121 unsigned NumSrcElts = SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1; 2122 APInt DemandedSrcBits = APInt::getNullValue(NumSrcEltBits); 2123 APInt DemandedSrcElts = APInt::getNullValue(NumSrcElts); 2124 for (unsigned i = 0; i != NumElts; ++i) 2125 if (DemandedElts[i]) { 2126 unsigned Offset = (i % Scale) * BitWidth; 2127 DemandedSrcBits.insertBits(DemandedBits, Offset); 2128 DemandedSrcElts.setBit(i / Scale); 2129 } 2130 2131 if (SrcVT.isVector()) { 2132 APInt KnownSrcUndef, KnownSrcZero; 2133 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownSrcUndef, 2134 KnownSrcZero, TLO, Depth + 1)) 2135 return true; 2136 } 2137 2138 KnownBits KnownSrcBits; 2139 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedSrcElts, 2140 KnownSrcBits, TLO, Depth + 1)) 2141 return true; 2142 } 2143 2144 // If this is a bitcast, let computeKnownBits handle it. Only do this on a 2145 // recursive call where Known may be useful to the caller. 2146 if (Depth > 0) { 2147 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2148 return false; 2149 } 2150 break; 2151 } 2152 case ISD::ADD: 2153 case ISD::MUL: 2154 case ISD::SUB: { 2155 // Add, Sub, and Mul don't demand any bits in positions beyond that 2156 // of the highest bit demanded of them. 2157 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1); 2158 SDNodeFlags Flags = Op.getNode()->getFlags(); 2159 unsigned DemandedBitsLZ = DemandedBits.countLeadingZeros(); 2160 APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ); 2161 if (SimplifyDemandedBits(Op0, LoMask, DemandedElts, Known2, TLO, 2162 Depth + 1) || 2163 SimplifyDemandedBits(Op1, LoMask, DemandedElts, Known2, TLO, 2164 Depth + 1) || 2165 // See if the operation should be performed at a smaller bit width. 2166 ShrinkDemandedOp(Op, BitWidth, DemandedBits, TLO)) { 2167 if (Flags.hasNoSignedWrap() || Flags.hasNoUnsignedWrap()) { 2168 // Disable the nsw and nuw flags. We can no longer guarantee that we 2169 // won't wrap after simplification. 2170 Flags.setNoSignedWrap(false); 2171 Flags.setNoUnsignedWrap(false); 2172 SDValue NewOp = 2173 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2174 return TLO.CombineTo(Op, NewOp); 2175 } 2176 return true; 2177 } 2178 2179 // Attempt to avoid multi-use ops if we don't need anything from them. 2180 if (!LoMask.isAllOnesValue() || !DemandedElts.isAllOnesValue()) { 2181 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits( 2182 Op0, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2183 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits( 2184 Op1, LoMask, DemandedElts, TLO.DAG, Depth + 1); 2185 if (DemandedOp0 || DemandedOp1) { 2186 Flags.setNoSignedWrap(false); 2187 Flags.setNoUnsignedWrap(false); 2188 Op0 = DemandedOp0 ? DemandedOp0 : Op0; 2189 Op1 = DemandedOp1 ? DemandedOp1 : Op1; 2190 SDValue NewOp = 2191 TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Op1, Flags); 2192 return TLO.CombineTo(Op, NewOp); 2193 } 2194 } 2195 2196 // If we have a constant operand, we may be able to turn it into -1 if we 2197 // do not demand the high bits. This can make the constant smaller to 2198 // encode, allow more general folding, or match specialized instruction 2199 // patterns (eg, 'blsr' on x86). Don't bother changing 1 to -1 because that 2200 // is probably not useful (and could be detrimental). 2201 ConstantSDNode *C = isConstOrConstSplat(Op1); 2202 APInt HighMask = APInt::getHighBitsSet(BitWidth, DemandedBitsLZ); 2203 if (C && !C->isAllOnesValue() && !C->isOne() && 2204 (C->getAPIntValue() | HighMask).isAllOnesValue()) { 2205 SDValue Neg1 = TLO.DAG.getAllOnesConstant(dl, VT); 2206 // Disable the nsw and nuw flags. We can no longer guarantee that we 2207 // won't wrap after simplification. 2208 Flags.setNoSignedWrap(false); 2209 Flags.setNoUnsignedWrap(false); 2210 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), dl, VT, Op0, Neg1, Flags); 2211 return TLO.CombineTo(Op, NewOp); 2212 } 2213 2214 LLVM_FALLTHROUGH; 2215 } 2216 default: 2217 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2218 if (SimplifyDemandedBitsForTargetNode(Op, DemandedBits, DemandedElts, 2219 Known, TLO, Depth)) 2220 return true; 2221 break; 2222 } 2223 2224 // Just use computeKnownBits to compute output bits. 2225 Known = TLO.DAG.computeKnownBits(Op, DemandedElts, Depth); 2226 break; 2227 } 2228 2229 // If we know the value of all of the demanded bits, return this as a 2230 // constant. 2231 if (DemandedBits.isSubsetOf(Known.Zero | Known.One)) { 2232 // Avoid folding to a constant if any OpaqueConstant is involved. 2233 const SDNode *N = Op.getNode(); 2234 for (SDNodeIterator I = SDNodeIterator::begin(N), 2235 E = SDNodeIterator::end(N); 2236 I != E; ++I) { 2237 SDNode *Op = *I; 2238 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) 2239 if (C->isOpaque()) 2240 return false; 2241 } 2242 // TODO: Handle float bits as well. 2243 if (VT.isInteger()) 2244 return TLO.CombineTo(Op, TLO.DAG.getConstant(Known.One, dl, VT)); 2245 } 2246 2247 return false; 2248 } 2249 2250 bool TargetLowering::SimplifyDemandedVectorElts(SDValue Op, 2251 const APInt &DemandedElts, 2252 APInt &KnownUndef, 2253 APInt &KnownZero, 2254 DAGCombinerInfo &DCI) const { 2255 SelectionDAG &DAG = DCI.DAG; 2256 TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2257 !DCI.isBeforeLegalizeOps()); 2258 2259 bool Simplified = 2260 SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, TLO); 2261 if (Simplified) { 2262 DCI.AddToWorklist(Op.getNode()); 2263 DCI.CommitTargetLoweringOpt(TLO); 2264 } 2265 2266 return Simplified; 2267 } 2268 2269 /// Given a vector binary operation and known undefined elements for each input 2270 /// operand, compute whether each element of the output is undefined. 2271 static APInt getKnownUndefForVectorBinop(SDValue BO, SelectionDAG &DAG, 2272 const APInt &UndefOp0, 2273 const APInt &UndefOp1) { 2274 EVT VT = BO.getValueType(); 2275 assert(DAG.getTargetLoweringInfo().isBinOp(BO.getOpcode()) && VT.isVector() && 2276 "Vector binop only"); 2277 2278 EVT EltVT = VT.getVectorElementType(); 2279 unsigned NumElts = VT.getVectorNumElements(); 2280 assert(UndefOp0.getBitWidth() == NumElts && 2281 UndefOp1.getBitWidth() == NumElts && "Bad type for undef analysis"); 2282 2283 auto getUndefOrConstantElt = [&](SDValue V, unsigned Index, 2284 const APInt &UndefVals) { 2285 if (UndefVals[Index]) 2286 return DAG.getUNDEF(EltVT); 2287 2288 if (auto *BV = dyn_cast<BuildVectorSDNode>(V)) { 2289 // Try hard to make sure that the getNode() call is not creating temporary 2290 // nodes. Ignore opaque integers because they do not constant fold. 2291 SDValue Elt = BV->getOperand(Index); 2292 auto *C = dyn_cast<ConstantSDNode>(Elt); 2293 if (isa<ConstantFPSDNode>(Elt) || Elt.isUndef() || (C && !C->isOpaque())) 2294 return Elt; 2295 } 2296 2297 return SDValue(); 2298 }; 2299 2300 APInt KnownUndef = APInt::getNullValue(NumElts); 2301 for (unsigned i = 0; i != NumElts; ++i) { 2302 // If both inputs for this element are either constant or undef and match 2303 // the element type, compute the constant/undef result for this element of 2304 // the vector. 2305 // TODO: Ideally we would use FoldConstantArithmetic() here, but that does 2306 // not handle FP constants. The code within getNode() should be refactored 2307 // to avoid the danger of creating a bogus temporary node here. 2308 SDValue C0 = getUndefOrConstantElt(BO.getOperand(0), i, UndefOp0); 2309 SDValue C1 = getUndefOrConstantElt(BO.getOperand(1), i, UndefOp1); 2310 if (C0 && C1 && C0.getValueType() == EltVT && C1.getValueType() == EltVT) 2311 if (DAG.getNode(BO.getOpcode(), SDLoc(BO), EltVT, C0, C1).isUndef()) 2312 KnownUndef.setBit(i); 2313 } 2314 return KnownUndef; 2315 } 2316 2317 bool TargetLowering::SimplifyDemandedVectorElts( 2318 SDValue Op, const APInt &OriginalDemandedElts, APInt &KnownUndef, 2319 APInt &KnownZero, TargetLoweringOpt &TLO, unsigned Depth, 2320 bool AssumeSingleUse) const { 2321 EVT VT = Op.getValueType(); 2322 unsigned Opcode = Op.getOpcode(); 2323 APInt DemandedElts = OriginalDemandedElts; 2324 unsigned NumElts = DemandedElts.getBitWidth(); 2325 assert(VT.isVector() && "Expected vector op"); 2326 2327 KnownUndef = KnownZero = APInt::getNullValue(NumElts); 2328 2329 // TODO: For now we assume we know nothing about scalable vectors. 2330 if (VT.isScalableVector()) 2331 return false; 2332 2333 assert(VT.getVectorNumElements() == NumElts && 2334 "Mask size mismatches value type element count!"); 2335 2336 // Undef operand. 2337 if (Op.isUndef()) { 2338 KnownUndef.setAllBits(); 2339 return false; 2340 } 2341 2342 // If Op has other users, assume that all elements are needed. 2343 if (!Op.getNode()->hasOneUse() && !AssumeSingleUse) 2344 DemandedElts.setAllBits(); 2345 2346 // Not demanding any elements from Op. 2347 if (DemandedElts == 0) { 2348 KnownUndef.setAllBits(); 2349 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2350 } 2351 2352 // Limit search depth. 2353 if (Depth >= SelectionDAG::MaxRecursionDepth) 2354 return false; 2355 2356 SDLoc DL(Op); 2357 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 2358 2359 // Helper for demanding the specified elements and all the bits of both binary 2360 // operands. 2361 auto SimplifyDemandedVectorEltsBinOp = [&](SDValue Op0, SDValue Op1) { 2362 SDValue NewOp0 = SimplifyMultipleUseDemandedVectorElts(Op0, DemandedElts, 2363 TLO.DAG, Depth + 1); 2364 SDValue NewOp1 = SimplifyMultipleUseDemandedVectorElts(Op1, DemandedElts, 2365 TLO.DAG, Depth + 1); 2366 if (NewOp0 || NewOp1) { 2367 SDValue NewOp = TLO.DAG.getNode( 2368 Opcode, SDLoc(Op), VT, NewOp0 ? NewOp0 : Op0, NewOp1 ? NewOp1 : Op1); 2369 return TLO.CombineTo(Op, NewOp); 2370 } 2371 return false; 2372 }; 2373 2374 switch (Opcode) { 2375 case ISD::SCALAR_TO_VECTOR: { 2376 if (!DemandedElts[0]) { 2377 KnownUndef.setAllBits(); 2378 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2379 } 2380 KnownUndef.setHighBits(NumElts - 1); 2381 break; 2382 } 2383 case ISD::BITCAST: { 2384 SDValue Src = Op.getOperand(0); 2385 EVT SrcVT = Src.getValueType(); 2386 2387 // We only handle vectors here. 2388 // TODO - investigate calling SimplifyDemandedBits/ComputeKnownBits? 2389 if (!SrcVT.isVector()) 2390 break; 2391 2392 // Fast handling of 'identity' bitcasts. 2393 unsigned NumSrcElts = SrcVT.getVectorNumElements(); 2394 if (NumSrcElts == NumElts) 2395 return SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, 2396 KnownZero, TLO, Depth + 1); 2397 2398 APInt SrcZero, SrcUndef; 2399 APInt SrcDemandedElts = APInt::getNullValue(NumSrcElts); 2400 2401 // Bitcast from 'large element' src vector to 'small element' vector, we 2402 // must demand a source element if any DemandedElt maps to it. 2403 if ((NumElts % NumSrcElts) == 0) { 2404 unsigned Scale = NumElts / NumSrcElts; 2405 for (unsigned i = 0; i != NumElts; ++i) 2406 if (DemandedElts[i]) 2407 SrcDemandedElts.setBit(i / Scale); 2408 2409 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2410 TLO, Depth + 1)) 2411 return true; 2412 2413 // Try calling SimplifyDemandedBits, converting demanded elts to the bits 2414 // of the large element. 2415 // TODO - bigendian once we have test coverage. 2416 if (TLO.DAG.getDataLayout().isLittleEndian()) { 2417 unsigned SrcEltSizeInBits = SrcVT.getScalarSizeInBits(); 2418 APInt SrcDemandedBits = APInt::getNullValue(SrcEltSizeInBits); 2419 for (unsigned i = 0; i != NumElts; ++i) 2420 if (DemandedElts[i]) { 2421 unsigned Ofs = (i % Scale) * EltSizeInBits; 2422 SrcDemandedBits.setBits(Ofs, Ofs + EltSizeInBits); 2423 } 2424 2425 KnownBits Known; 2426 if (SimplifyDemandedBits(Src, SrcDemandedBits, SrcDemandedElts, Known, 2427 TLO, Depth + 1)) 2428 return true; 2429 } 2430 2431 // If the src element is zero/undef then all the output elements will be - 2432 // only demanded elements are guaranteed to be correct. 2433 for (unsigned i = 0; i != NumSrcElts; ++i) { 2434 if (SrcDemandedElts[i]) { 2435 if (SrcZero[i]) 2436 KnownZero.setBits(i * Scale, (i + 1) * Scale); 2437 if (SrcUndef[i]) 2438 KnownUndef.setBits(i * Scale, (i + 1) * Scale); 2439 } 2440 } 2441 } 2442 2443 // Bitcast from 'small element' src vector to 'large element' vector, we 2444 // demand all smaller source elements covered by the larger demanded element 2445 // of this vector. 2446 if ((NumSrcElts % NumElts) == 0) { 2447 unsigned Scale = NumSrcElts / NumElts; 2448 for (unsigned i = 0; i != NumElts; ++i) 2449 if (DemandedElts[i]) 2450 SrcDemandedElts.setBits(i * Scale, (i + 1) * Scale); 2451 2452 if (SimplifyDemandedVectorElts(Src, SrcDemandedElts, SrcUndef, SrcZero, 2453 TLO, Depth + 1)) 2454 return true; 2455 2456 // If all the src elements covering an output element are zero/undef, then 2457 // the output element will be as well, assuming it was demanded. 2458 for (unsigned i = 0; i != NumElts; ++i) { 2459 if (DemandedElts[i]) { 2460 if (SrcZero.extractBits(Scale, i * Scale).isAllOnesValue()) 2461 KnownZero.setBit(i); 2462 if (SrcUndef.extractBits(Scale, i * Scale).isAllOnesValue()) 2463 KnownUndef.setBit(i); 2464 } 2465 } 2466 } 2467 break; 2468 } 2469 case ISD::BUILD_VECTOR: { 2470 // Check all elements and simplify any unused elements with UNDEF. 2471 if (!DemandedElts.isAllOnesValue()) { 2472 // Don't simplify BROADCASTS. 2473 if (llvm::any_of(Op->op_values(), 2474 [&](SDValue Elt) { return Op.getOperand(0) != Elt; })) { 2475 SmallVector<SDValue, 32> Ops(Op->op_begin(), Op->op_end()); 2476 bool Updated = false; 2477 for (unsigned i = 0; i != NumElts; ++i) { 2478 if (!DemandedElts[i] && !Ops[i].isUndef()) { 2479 Ops[i] = TLO.DAG.getUNDEF(Ops[0].getValueType()); 2480 KnownUndef.setBit(i); 2481 Updated = true; 2482 } 2483 } 2484 if (Updated) 2485 return TLO.CombineTo(Op, TLO.DAG.getBuildVector(VT, DL, Ops)); 2486 } 2487 } 2488 for (unsigned i = 0; i != NumElts; ++i) { 2489 SDValue SrcOp = Op.getOperand(i); 2490 if (SrcOp.isUndef()) { 2491 KnownUndef.setBit(i); 2492 } else if (EltSizeInBits == SrcOp.getScalarValueSizeInBits() && 2493 (isNullConstant(SrcOp) || isNullFPConstant(SrcOp))) { 2494 KnownZero.setBit(i); 2495 } 2496 } 2497 break; 2498 } 2499 case ISD::CONCAT_VECTORS: { 2500 EVT SubVT = Op.getOperand(0).getValueType(); 2501 unsigned NumSubVecs = Op.getNumOperands(); 2502 unsigned NumSubElts = SubVT.getVectorNumElements(); 2503 for (unsigned i = 0; i != NumSubVecs; ++i) { 2504 SDValue SubOp = Op.getOperand(i); 2505 APInt SubElts = DemandedElts.extractBits(NumSubElts, i * NumSubElts); 2506 APInt SubUndef, SubZero; 2507 if (SimplifyDemandedVectorElts(SubOp, SubElts, SubUndef, SubZero, TLO, 2508 Depth + 1)) 2509 return true; 2510 KnownUndef.insertBits(SubUndef, i * NumSubElts); 2511 KnownZero.insertBits(SubZero, i * NumSubElts); 2512 } 2513 break; 2514 } 2515 case ISD::INSERT_SUBVECTOR: { 2516 // Demand any elements from the subvector and the remainder from the src its 2517 // inserted into. 2518 SDValue Src = Op.getOperand(0); 2519 SDValue Sub = Op.getOperand(1); 2520 uint64_t Idx = Op.getConstantOperandVal(2); 2521 unsigned NumSubElts = Sub.getValueType().getVectorNumElements(); 2522 APInt DemandedSubElts = DemandedElts.extractBits(NumSubElts, Idx); 2523 APInt DemandedSrcElts = DemandedElts; 2524 DemandedSrcElts.insertBits(APInt::getNullValue(NumSubElts), Idx); 2525 2526 APInt SubUndef, SubZero; 2527 if (SimplifyDemandedVectorElts(Sub, DemandedSubElts, SubUndef, SubZero, TLO, 2528 Depth + 1)) 2529 return true; 2530 2531 // If none of the src operand elements are demanded, replace it with undef. 2532 if (!DemandedSrcElts && !Src.isUndef()) 2533 return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, 2534 TLO.DAG.getUNDEF(VT), Sub, 2535 Op.getOperand(2))); 2536 2537 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, KnownUndef, KnownZero, 2538 TLO, Depth + 1)) 2539 return true; 2540 KnownUndef.insertBits(SubUndef, Idx); 2541 KnownZero.insertBits(SubZero, Idx); 2542 2543 // Attempt to avoid multi-use ops if we don't need anything from them. 2544 if (!DemandedSrcElts.isAllOnesValue() || 2545 !DemandedSubElts.isAllOnesValue()) { 2546 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 2547 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 2548 SDValue NewSub = SimplifyMultipleUseDemandedVectorElts( 2549 Sub, DemandedSubElts, TLO.DAG, Depth + 1); 2550 if (NewSrc || NewSub) { 2551 NewSrc = NewSrc ? NewSrc : Src; 2552 NewSub = NewSub ? NewSub : Sub; 2553 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2554 NewSub, Op.getOperand(2)); 2555 return TLO.CombineTo(Op, NewOp); 2556 } 2557 } 2558 break; 2559 } 2560 case ISD::EXTRACT_SUBVECTOR: { 2561 // Offset the demanded elts by the subvector index. 2562 SDValue Src = Op.getOperand(0); 2563 if (Src.getValueType().isScalableVector()) 2564 break; 2565 uint64_t Idx = Op.getConstantOperandVal(1); 2566 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2567 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx); 2568 2569 APInt SrcUndef, SrcZero; 2570 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 2571 Depth + 1)) 2572 return true; 2573 KnownUndef = SrcUndef.extractBits(NumElts, Idx); 2574 KnownZero = SrcZero.extractBits(NumElts, Idx); 2575 2576 // Attempt to avoid multi-use ops if we don't need anything from them. 2577 if (!DemandedElts.isAllOnesValue()) { 2578 SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts( 2579 Src, DemandedSrcElts, TLO.DAG, Depth + 1); 2580 if (NewSrc) { 2581 SDValue NewOp = TLO.DAG.getNode(Op.getOpcode(), SDLoc(Op), VT, NewSrc, 2582 Op.getOperand(1)); 2583 return TLO.CombineTo(Op, NewOp); 2584 } 2585 } 2586 break; 2587 } 2588 case ISD::INSERT_VECTOR_ELT: { 2589 SDValue Vec = Op.getOperand(0); 2590 SDValue Scl = Op.getOperand(1); 2591 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2592 2593 // For a legal, constant insertion index, if we don't need this insertion 2594 // then strip it, else remove it from the demanded elts. 2595 if (CIdx && CIdx->getAPIntValue().ult(NumElts)) { 2596 unsigned Idx = CIdx->getZExtValue(); 2597 if (!DemandedElts[Idx]) 2598 return TLO.CombineTo(Op, Vec); 2599 2600 APInt DemandedVecElts(DemandedElts); 2601 DemandedVecElts.clearBit(Idx); 2602 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef, 2603 KnownZero, TLO, Depth + 1)) 2604 return true; 2605 2606 KnownUndef.clearBit(Idx); 2607 if (Scl.isUndef()) 2608 KnownUndef.setBit(Idx); 2609 2610 KnownZero.clearBit(Idx); 2611 if (isNullConstant(Scl) || isNullFPConstant(Scl)) 2612 KnownZero.setBit(Idx); 2613 break; 2614 } 2615 2616 APInt VecUndef, VecZero; 2617 if (SimplifyDemandedVectorElts(Vec, DemandedElts, VecUndef, VecZero, TLO, 2618 Depth + 1)) 2619 return true; 2620 // Without knowing the insertion index we can't set KnownUndef/KnownZero. 2621 break; 2622 } 2623 case ISD::VSELECT: { 2624 // Try to transform the select condition based on the current demanded 2625 // elements. 2626 // TODO: If a condition element is undef, we can choose from one arm of the 2627 // select (and if one arm is undef, then we can propagate that to the 2628 // result). 2629 // TODO - add support for constant vselect masks (see IR version of this). 2630 APInt UnusedUndef, UnusedZero; 2631 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, UnusedUndef, 2632 UnusedZero, TLO, Depth + 1)) 2633 return true; 2634 2635 // See if we can simplify either vselect operand. 2636 APInt DemandedLHS(DemandedElts); 2637 APInt DemandedRHS(DemandedElts); 2638 APInt UndefLHS, ZeroLHS; 2639 APInt UndefRHS, ZeroRHS; 2640 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedLHS, UndefLHS, 2641 ZeroLHS, TLO, Depth + 1)) 2642 return true; 2643 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedRHS, UndefRHS, 2644 ZeroRHS, TLO, Depth + 1)) 2645 return true; 2646 2647 KnownUndef = UndefLHS & UndefRHS; 2648 KnownZero = ZeroLHS & ZeroRHS; 2649 break; 2650 } 2651 case ISD::VECTOR_SHUFFLE: { 2652 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(Op)->getMask(); 2653 2654 // Collect demanded elements from shuffle operands.. 2655 APInt DemandedLHS(NumElts, 0); 2656 APInt DemandedRHS(NumElts, 0); 2657 for (unsigned i = 0; i != NumElts; ++i) { 2658 int M = ShuffleMask[i]; 2659 if (M < 0 || !DemandedElts[i]) 2660 continue; 2661 assert(0 <= M && M < (int)(2 * NumElts) && "Shuffle index out of range"); 2662 if (M < (int)NumElts) 2663 DemandedLHS.setBit(M); 2664 else 2665 DemandedRHS.setBit(M - NumElts); 2666 } 2667 2668 // See if we can simplify either shuffle operand. 2669 APInt UndefLHS, ZeroLHS; 2670 APInt UndefRHS, ZeroRHS; 2671 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, UndefLHS, 2672 ZeroLHS, TLO, Depth + 1)) 2673 return true; 2674 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, UndefRHS, 2675 ZeroRHS, TLO, Depth + 1)) 2676 return true; 2677 2678 // Simplify mask using undef elements from LHS/RHS. 2679 bool Updated = false; 2680 bool IdentityLHS = true, IdentityRHS = true; 2681 SmallVector<int, 32> NewMask(ShuffleMask.begin(), ShuffleMask.end()); 2682 for (unsigned i = 0; i != NumElts; ++i) { 2683 int &M = NewMask[i]; 2684 if (M < 0) 2685 continue; 2686 if (!DemandedElts[i] || (M < (int)NumElts && UndefLHS[M]) || 2687 (M >= (int)NumElts && UndefRHS[M - NumElts])) { 2688 Updated = true; 2689 M = -1; 2690 } 2691 IdentityLHS &= (M < 0) || (M == (int)i); 2692 IdentityRHS &= (M < 0) || ((M - NumElts) == i); 2693 } 2694 2695 // Update legal shuffle masks based on demanded elements if it won't reduce 2696 // to Identity which can cause premature removal of the shuffle mask. 2697 if (Updated && !IdentityLHS && !IdentityRHS && !TLO.LegalOps) { 2698 SDValue LegalShuffle = 2699 buildLegalVectorShuffle(VT, DL, Op.getOperand(0), Op.getOperand(1), 2700 NewMask, TLO.DAG); 2701 if (LegalShuffle) 2702 return TLO.CombineTo(Op, LegalShuffle); 2703 } 2704 2705 // Propagate undef/zero elements from LHS/RHS. 2706 for (unsigned i = 0; i != NumElts; ++i) { 2707 int M = ShuffleMask[i]; 2708 if (M < 0) { 2709 KnownUndef.setBit(i); 2710 } else if (M < (int)NumElts) { 2711 if (UndefLHS[M]) 2712 KnownUndef.setBit(i); 2713 if (ZeroLHS[M]) 2714 KnownZero.setBit(i); 2715 } else { 2716 if (UndefRHS[M - NumElts]) 2717 KnownUndef.setBit(i); 2718 if (ZeroRHS[M - NumElts]) 2719 KnownZero.setBit(i); 2720 } 2721 } 2722 break; 2723 } 2724 case ISD::ANY_EXTEND_VECTOR_INREG: 2725 case ISD::SIGN_EXTEND_VECTOR_INREG: 2726 case ISD::ZERO_EXTEND_VECTOR_INREG: { 2727 APInt SrcUndef, SrcZero; 2728 SDValue Src = Op.getOperand(0); 2729 unsigned NumSrcElts = Src.getValueType().getVectorNumElements(); 2730 APInt DemandedSrcElts = DemandedElts.zextOrSelf(NumSrcElts); 2731 if (SimplifyDemandedVectorElts(Src, DemandedSrcElts, SrcUndef, SrcZero, TLO, 2732 Depth + 1)) 2733 return true; 2734 KnownZero = SrcZero.zextOrTrunc(NumElts); 2735 KnownUndef = SrcUndef.zextOrTrunc(NumElts); 2736 2737 if (Op.getOpcode() == ISD::ANY_EXTEND_VECTOR_INREG && 2738 Op.getValueSizeInBits() == Src.getValueSizeInBits() && 2739 DemandedSrcElts == 1 && TLO.DAG.getDataLayout().isLittleEndian()) { 2740 // aext - if we just need the bottom element then we can bitcast. 2741 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src)); 2742 } 2743 2744 if (Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) { 2745 // zext(undef) upper bits are guaranteed to be zero. 2746 if (DemandedElts.isSubsetOf(KnownUndef)) 2747 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 2748 KnownUndef.clearAllBits(); 2749 } 2750 break; 2751 } 2752 2753 // TODO: There are more binop opcodes that could be handled here - MIN, 2754 // MAX, saturated math, etc. 2755 case ISD::OR: 2756 case ISD::XOR: 2757 case ISD::ADD: 2758 case ISD::SUB: 2759 case ISD::FADD: 2760 case ISD::FSUB: 2761 case ISD::FMUL: 2762 case ISD::FDIV: 2763 case ISD::FREM: { 2764 SDValue Op0 = Op.getOperand(0); 2765 SDValue Op1 = Op.getOperand(1); 2766 2767 APInt UndefRHS, ZeroRHS; 2768 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 2769 Depth + 1)) 2770 return true; 2771 APInt UndefLHS, ZeroLHS; 2772 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 2773 Depth + 1)) 2774 return true; 2775 2776 KnownZero = ZeroLHS & ZeroRHS; 2777 KnownUndef = getKnownUndefForVectorBinop(Op, TLO.DAG, UndefLHS, UndefRHS); 2778 2779 // Attempt to avoid multi-use ops if we don't need anything from them. 2780 // TODO - use KnownUndef to relax the demandedelts? 2781 if (!DemandedElts.isAllOnesValue()) 2782 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2783 return true; 2784 break; 2785 } 2786 case ISD::SHL: 2787 case ISD::SRL: 2788 case ISD::SRA: 2789 case ISD::ROTL: 2790 case ISD::ROTR: { 2791 SDValue Op0 = Op.getOperand(0); 2792 SDValue Op1 = Op.getOperand(1); 2793 2794 APInt UndefRHS, ZeroRHS; 2795 if (SimplifyDemandedVectorElts(Op1, DemandedElts, UndefRHS, ZeroRHS, TLO, 2796 Depth + 1)) 2797 return true; 2798 APInt UndefLHS, ZeroLHS; 2799 if (SimplifyDemandedVectorElts(Op0, DemandedElts, UndefLHS, ZeroLHS, TLO, 2800 Depth + 1)) 2801 return true; 2802 2803 KnownZero = ZeroLHS; 2804 KnownUndef = UndefLHS & UndefRHS; // TODO: use getKnownUndefForVectorBinop? 2805 2806 // Attempt to avoid multi-use ops if we don't need anything from them. 2807 // TODO - use KnownUndef to relax the demandedelts? 2808 if (!DemandedElts.isAllOnesValue()) 2809 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2810 return true; 2811 break; 2812 } 2813 case ISD::MUL: 2814 case ISD::AND: { 2815 SDValue Op0 = Op.getOperand(0); 2816 SDValue Op1 = Op.getOperand(1); 2817 2818 APInt SrcUndef, SrcZero; 2819 if (SimplifyDemandedVectorElts(Op1, DemandedElts, SrcUndef, SrcZero, TLO, 2820 Depth + 1)) 2821 return true; 2822 if (SimplifyDemandedVectorElts(Op0, DemandedElts, KnownUndef, KnownZero, 2823 TLO, Depth + 1)) 2824 return true; 2825 2826 // If either side has a zero element, then the result element is zero, even 2827 // if the other is an UNDEF. 2828 // TODO: Extend getKnownUndefForVectorBinop to also deal with known zeros 2829 // and then handle 'and' nodes with the rest of the binop opcodes. 2830 KnownZero |= SrcZero; 2831 KnownUndef &= SrcUndef; 2832 KnownUndef &= ~KnownZero; 2833 2834 // Attempt to avoid multi-use ops if we don't need anything from them. 2835 // TODO - use KnownUndef to relax the demandedelts? 2836 if (!DemandedElts.isAllOnesValue()) 2837 if (SimplifyDemandedVectorEltsBinOp(Op0, Op1)) 2838 return true; 2839 break; 2840 } 2841 case ISD::TRUNCATE: 2842 case ISD::SIGN_EXTEND: 2843 case ISD::ZERO_EXTEND: 2844 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, KnownUndef, 2845 KnownZero, TLO, Depth + 1)) 2846 return true; 2847 2848 if (Op.getOpcode() == ISD::ZERO_EXTEND) { 2849 // zext(undef) upper bits are guaranteed to be zero. 2850 if (DemandedElts.isSubsetOf(KnownUndef)) 2851 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT)); 2852 KnownUndef.clearAllBits(); 2853 } 2854 break; 2855 default: { 2856 if (Op.getOpcode() >= ISD::BUILTIN_OP_END) { 2857 if (SimplifyDemandedVectorEltsForTargetNode(Op, DemandedElts, KnownUndef, 2858 KnownZero, TLO, Depth)) 2859 return true; 2860 } else { 2861 KnownBits Known; 2862 APInt DemandedBits = APInt::getAllOnesValue(EltSizeInBits); 2863 if (SimplifyDemandedBits(Op, DemandedBits, OriginalDemandedElts, Known, 2864 TLO, Depth, AssumeSingleUse)) 2865 return true; 2866 } 2867 break; 2868 } 2869 } 2870 assert((KnownUndef & KnownZero) == 0 && "Elements flagged as undef AND zero"); 2871 2872 // Constant fold all undef cases. 2873 // TODO: Handle zero cases as well. 2874 if (DemandedElts.isSubsetOf(KnownUndef)) 2875 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT)); 2876 2877 return false; 2878 } 2879 2880 /// Determine which of the bits specified in Mask are known to be either zero or 2881 /// one and return them in the Known. 2882 void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op, 2883 KnownBits &Known, 2884 const APInt &DemandedElts, 2885 const SelectionDAG &DAG, 2886 unsigned Depth) const { 2887 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2888 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2889 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2890 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2891 "Should use MaskedValueIsZero if you don't know whether Op" 2892 " is a target node!"); 2893 Known.resetAll(); 2894 } 2895 2896 void TargetLowering::computeKnownBitsForTargetInstr( 2897 GISelKnownBits &Analysis, Register R, KnownBits &Known, 2898 const APInt &DemandedElts, const MachineRegisterInfo &MRI, 2899 unsigned Depth) const { 2900 Known.resetAll(); 2901 } 2902 2903 void TargetLowering::computeKnownBitsForFrameIndex( 2904 const int FrameIdx, KnownBits &Known, const MachineFunction &MF) const { 2905 // The low bits are known zero if the pointer is aligned. 2906 Known.Zero.setLowBits(Log2(MF.getFrameInfo().getObjectAlign(FrameIdx))); 2907 } 2908 2909 Align TargetLowering::computeKnownAlignForTargetInstr( 2910 GISelKnownBits &Analysis, Register R, const MachineRegisterInfo &MRI, 2911 unsigned Depth) const { 2912 return Align(1); 2913 } 2914 2915 /// This method can be implemented by targets that want to expose additional 2916 /// information about sign bits to the DAG Combiner. 2917 unsigned TargetLowering::ComputeNumSignBitsForTargetNode(SDValue Op, 2918 const APInt &, 2919 const SelectionDAG &, 2920 unsigned Depth) const { 2921 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2922 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2923 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2924 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2925 "Should use ComputeNumSignBits if you don't know whether Op" 2926 " is a target node!"); 2927 return 1; 2928 } 2929 2930 unsigned TargetLowering::computeNumSignBitsForTargetInstr( 2931 GISelKnownBits &Analysis, Register R, const APInt &DemandedElts, 2932 const MachineRegisterInfo &MRI, unsigned Depth) const { 2933 return 1; 2934 } 2935 2936 bool TargetLowering::SimplifyDemandedVectorEltsForTargetNode( 2937 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero, 2938 TargetLoweringOpt &TLO, unsigned Depth) const { 2939 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2940 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2941 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2942 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2943 "Should use SimplifyDemandedVectorElts if you don't know whether Op" 2944 " is a target node!"); 2945 return false; 2946 } 2947 2948 bool TargetLowering::SimplifyDemandedBitsForTargetNode( 2949 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 2950 KnownBits &Known, TargetLoweringOpt &TLO, unsigned Depth) const { 2951 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 2952 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2953 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2954 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2955 "Should use SimplifyDemandedBits if you don't know whether Op" 2956 " is a target node!"); 2957 computeKnownBitsForTargetNode(Op, Known, DemandedElts, TLO.DAG, Depth); 2958 return false; 2959 } 2960 2961 SDValue TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode( 2962 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts, 2963 SelectionDAG &DAG, unsigned Depth) const { 2964 assert( 2965 (Op.getOpcode() >= ISD::BUILTIN_OP_END || 2966 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 2967 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 2968 Op.getOpcode() == ISD::INTRINSIC_VOID) && 2969 "Should use SimplifyMultipleUseDemandedBits if you don't know whether Op" 2970 " is a target node!"); 2971 return SDValue(); 2972 } 2973 2974 SDValue 2975 TargetLowering::buildLegalVectorShuffle(EVT VT, const SDLoc &DL, SDValue N0, 2976 SDValue N1, MutableArrayRef<int> Mask, 2977 SelectionDAG &DAG) const { 2978 bool LegalMask = isShuffleMaskLegal(Mask, VT); 2979 if (!LegalMask) { 2980 std::swap(N0, N1); 2981 ShuffleVectorSDNode::commuteMask(Mask); 2982 LegalMask = isShuffleMaskLegal(Mask, VT); 2983 } 2984 2985 if (!LegalMask) 2986 return SDValue(); 2987 2988 return DAG.getVectorShuffle(VT, DL, N0, N1, Mask); 2989 } 2990 2991 const Constant *TargetLowering::getTargetConstantFromLoad(LoadSDNode*) const { 2992 return nullptr; 2993 } 2994 2995 bool TargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 2996 const SelectionDAG &DAG, 2997 bool SNaN, 2998 unsigned Depth) const { 2999 assert((Op.getOpcode() >= ISD::BUILTIN_OP_END || 3000 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN || 3001 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN || 3002 Op.getOpcode() == ISD::INTRINSIC_VOID) && 3003 "Should use isKnownNeverNaN if you don't know whether Op" 3004 " is a target node!"); 3005 return false; 3006 } 3007 3008 // FIXME: Ideally, this would use ISD::isConstantSplatVector(), but that must 3009 // work with truncating build vectors and vectors with elements of less than 3010 // 8 bits. 3011 bool TargetLowering::isConstTrueVal(const SDNode *N) const { 3012 if (!N) 3013 return false; 3014 3015 APInt CVal; 3016 if (auto *CN = dyn_cast<ConstantSDNode>(N)) { 3017 CVal = CN->getAPIntValue(); 3018 } else if (auto *BV = dyn_cast<BuildVectorSDNode>(N)) { 3019 auto *CN = BV->getConstantSplatNode(); 3020 if (!CN) 3021 return false; 3022 3023 // If this is a truncating build vector, truncate the splat value. 3024 // Otherwise, we may fail to match the expected values below. 3025 unsigned BVEltWidth = BV->getValueType(0).getScalarSizeInBits(); 3026 CVal = CN->getAPIntValue(); 3027 if (BVEltWidth < CVal.getBitWidth()) 3028 CVal = CVal.trunc(BVEltWidth); 3029 } else { 3030 return false; 3031 } 3032 3033 switch (getBooleanContents(N->getValueType(0))) { 3034 case UndefinedBooleanContent: 3035 return CVal[0]; 3036 case ZeroOrOneBooleanContent: 3037 return CVal.isOneValue(); 3038 case ZeroOrNegativeOneBooleanContent: 3039 return CVal.isAllOnesValue(); 3040 } 3041 3042 llvm_unreachable("Invalid boolean contents"); 3043 } 3044 3045 bool TargetLowering::isConstFalseVal(const SDNode *N) const { 3046 if (!N) 3047 return false; 3048 3049 const ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N); 3050 if (!CN) { 3051 const BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(N); 3052 if (!BV) 3053 return false; 3054 3055 // Only interested in constant splats, we don't care about undef 3056 // elements in identifying boolean constants and getConstantSplatNode 3057 // returns NULL if all ops are undef; 3058 CN = BV->getConstantSplatNode(); 3059 if (!CN) 3060 return false; 3061 } 3062 3063 if (getBooleanContents(N->getValueType(0)) == UndefinedBooleanContent) 3064 return !CN->getAPIntValue()[0]; 3065 3066 return CN->isNullValue(); 3067 } 3068 3069 bool TargetLowering::isExtendedTrueVal(const ConstantSDNode *N, EVT VT, 3070 bool SExt) const { 3071 if (VT == MVT::i1) 3072 return N->isOne(); 3073 3074 TargetLowering::BooleanContent Cnt = getBooleanContents(VT); 3075 switch (Cnt) { 3076 case TargetLowering::ZeroOrOneBooleanContent: 3077 // An extended value of 1 is always true, unless its original type is i1, 3078 // in which case it will be sign extended to -1. 3079 return (N->isOne() && !SExt) || (SExt && (N->getValueType(0) != MVT::i1)); 3080 case TargetLowering::UndefinedBooleanContent: 3081 case TargetLowering::ZeroOrNegativeOneBooleanContent: 3082 return N->isAllOnesValue() && SExt; 3083 } 3084 llvm_unreachable("Unexpected enumeration."); 3085 } 3086 3087 /// This helper function of SimplifySetCC tries to optimize the comparison when 3088 /// either operand of the SetCC node is a bitwise-and instruction. 3089 SDValue TargetLowering::foldSetCCWithAnd(EVT VT, SDValue N0, SDValue N1, 3090 ISD::CondCode Cond, const SDLoc &DL, 3091 DAGCombinerInfo &DCI) const { 3092 // Match these patterns in any of their permutations: 3093 // (X & Y) == Y 3094 // (X & Y) != Y 3095 if (N1.getOpcode() == ISD::AND && N0.getOpcode() != ISD::AND) 3096 std::swap(N0, N1); 3097 3098 EVT OpVT = N0.getValueType(); 3099 if (N0.getOpcode() != ISD::AND || !OpVT.isInteger() || 3100 (Cond != ISD::SETEQ && Cond != ISD::SETNE)) 3101 return SDValue(); 3102 3103 SDValue X, Y; 3104 if (N0.getOperand(0) == N1) { 3105 X = N0.getOperand(1); 3106 Y = N0.getOperand(0); 3107 } else if (N0.getOperand(1) == N1) { 3108 X = N0.getOperand(0); 3109 Y = N0.getOperand(1); 3110 } else { 3111 return SDValue(); 3112 } 3113 3114 SelectionDAG &DAG = DCI.DAG; 3115 SDValue Zero = DAG.getConstant(0, DL, OpVT); 3116 if (DAG.isKnownToBeAPowerOfTwo(Y)) { 3117 // Simplify X & Y == Y to X & Y != 0 if Y has exactly one bit set. 3118 // Note that where Y is variable and is known to have at most one bit set 3119 // (for example, if it is Z & 1) we cannot do this; the expressions are not 3120 // equivalent when Y == 0. 3121 assert(OpVT.isInteger()); 3122 Cond = ISD::getSetCCInverse(Cond, OpVT); 3123 if (DCI.isBeforeLegalizeOps() || 3124 isCondCodeLegal(Cond, N0.getSimpleValueType())) 3125 return DAG.getSetCC(DL, VT, N0, Zero, Cond); 3126 } else if (N0.hasOneUse() && hasAndNotCompare(Y)) { 3127 // If the target supports an 'and-not' or 'and-complement' logic operation, 3128 // try to use that to make a comparison operation more efficient. 3129 // But don't do this transform if the mask is a single bit because there are 3130 // more efficient ways to deal with that case (for example, 'bt' on x86 or 3131 // 'rlwinm' on PPC). 3132 3133 // Bail out if the compare operand that we want to turn into a zero is 3134 // already a zero (otherwise, infinite loop). 3135 auto *YConst = dyn_cast<ConstantSDNode>(Y); 3136 if (YConst && YConst->isNullValue()) 3137 return SDValue(); 3138 3139 // Transform this into: ~X & Y == 0. 3140 SDValue NotX = DAG.getNOT(SDLoc(X), X, OpVT); 3141 SDValue NewAnd = DAG.getNode(ISD::AND, SDLoc(N0), OpVT, NotX, Y); 3142 return DAG.getSetCC(DL, VT, NewAnd, Zero, Cond); 3143 } 3144 3145 return SDValue(); 3146 } 3147 3148 /// There are multiple IR patterns that could be checking whether certain 3149 /// truncation of a signed number would be lossy or not. The pattern which is 3150 /// best at IR level, may not lower optimally. Thus, we want to unfold it. 3151 /// We are looking for the following pattern: (KeptBits is a constant) 3152 /// (add %x, (1 << (KeptBits-1))) srccond (1 << KeptBits) 3153 /// KeptBits won't be bitwidth(x), that will be constant-folded to true/false. 3154 /// KeptBits also can't be 1, that would have been folded to %x dstcond 0 3155 /// We will unfold it into the natural trunc+sext pattern: 3156 /// ((%x << C) a>> C) dstcond %x 3157 /// Where C = bitwidth(x) - KeptBits and C u< bitwidth(x) 3158 SDValue TargetLowering::optimizeSetCCOfSignedTruncationCheck( 3159 EVT SCCVT, SDValue N0, SDValue N1, ISD::CondCode Cond, DAGCombinerInfo &DCI, 3160 const SDLoc &DL) const { 3161 // We must be comparing with a constant. 3162 ConstantSDNode *C1; 3163 if (!(C1 = dyn_cast<ConstantSDNode>(N1))) 3164 return SDValue(); 3165 3166 // N0 should be: add %x, (1 << (KeptBits-1)) 3167 if (N0->getOpcode() != ISD::ADD) 3168 return SDValue(); 3169 3170 // And we must be 'add'ing a constant. 3171 ConstantSDNode *C01; 3172 if (!(C01 = dyn_cast<ConstantSDNode>(N0->getOperand(1)))) 3173 return SDValue(); 3174 3175 SDValue X = N0->getOperand(0); 3176 EVT XVT = X.getValueType(); 3177 3178 // Validate constants ... 3179 3180 APInt I1 = C1->getAPIntValue(); 3181 3182 ISD::CondCode NewCond; 3183 if (Cond == ISD::CondCode::SETULT) { 3184 NewCond = ISD::CondCode::SETEQ; 3185 } else if (Cond == ISD::CondCode::SETULE) { 3186 NewCond = ISD::CondCode::SETEQ; 3187 // But need to 'canonicalize' the constant. 3188 I1 += 1; 3189 } else if (Cond == ISD::CondCode::SETUGT) { 3190 NewCond = ISD::CondCode::SETNE; 3191 // But need to 'canonicalize' the constant. 3192 I1 += 1; 3193 } else if (Cond == ISD::CondCode::SETUGE) { 3194 NewCond = ISD::CondCode::SETNE; 3195 } else 3196 return SDValue(); 3197 3198 APInt I01 = C01->getAPIntValue(); 3199 3200 auto checkConstants = [&I1, &I01]() -> bool { 3201 // Both of them must be power-of-two, and the constant from setcc is bigger. 3202 return I1.ugt(I01) && I1.isPowerOf2() && I01.isPowerOf2(); 3203 }; 3204 3205 if (checkConstants()) { 3206 // Great, e.g. got icmp ult i16 (add i16 %x, 128), 256 3207 } else { 3208 // What if we invert constants? (and the target predicate) 3209 I1.negate(); 3210 I01.negate(); 3211 assert(XVT.isInteger()); 3212 NewCond = getSetCCInverse(NewCond, XVT); 3213 if (!checkConstants()) 3214 return SDValue(); 3215 // Great, e.g. got icmp uge i16 (add i16 %x, -128), -256 3216 } 3217 3218 // They are power-of-two, so which bit is set? 3219 const unsigned KeptBits = I1.logBase2(); 3220 const unsigned KeptBitsMinusOne = I01.logBase2(); 3221 3222 // Magic! 3223 if (KeptBits != (KeptBitsMinusOne + 1)) 3224 return SDValue(); 3225 assert(KeptBits > 0 && KeptBits < XVT.getSizeInBits() && "unreachable"); 3226 3227 // We don't want to do this in every single case. 3228 SelectionDAG &DAG = DCI.DAG; 3229 if (!DAG.getTargetLoweringInfo().shouldTransformSignedTruncationCheck( 3230 XVT, KeptBits)) 3231 return SDValue(); 3232 3233 const unsigned MaskedBits = XVT.getSizeInBits() - KeptBits; 3234 assert(MaskedBits > 0 && MaskedBits < XVT.getSizeInBits() && "unreachable"); 3235 3236 // Unfold into: ((%x << C) a>> C) cond %x 3237 // Where 'cond' will be either 'eq' or 'ne'. 3238 SDValue ShiftAmt = DAG.getConstant(MaskedBits, DL, XVT); 3239 SDValue T0 = DAG.getNode(ISD::SHL, DL, XVT, X, ShiftAmt); 3240 SDValue T1 = DAG.getNode(ISD::SRA, DL, XVT, T0, ShiftAmt); 3241 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, X, NewCond); 3242 3243 return T2; 3244 } 3245 3246 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3247 SDValue TargetLowering::optimizeSetCCByHoistingAndByConstFromLogicalShift( 3248 EVT SCCVT, SDValue N0, SDValue N1C, ISD::CondCode Cond, 3249 DAGCombinerInfo &DCI, const SDLoc &DL) const { 3250 assert(isConstOrConstSplat(N1C) && 3251 isConstOrConstSplat(N1C)->getAPIntValue().isNullValue() && 3252 "Should be a comparison with 0."); 3253 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3254 "Valid only for [in]equality comparisons."); 3255 3256 unsigned NewShiftOpcode; 3257 SDValue X, C, Y; 3258 3259 SelectionDAG &DAG = DCI.DAG; 3260 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3261 3262 // Look for '(C l>>/<< Y)'. 3263 auto Match = [&NewShiftOpcode, &X, &C, &Y, &TLI, &DAG](SDValue V) { 3264 // The shift should be one-use. 3265 if (!V.hasOneUse()) 3266 return false; 3267 unsigned OldShiftOpcode = V.getOpcode(); 3268 switch (OldShiftOpcode) { 3269 case ISD::SHL: 3270 NewShiftOpcode = ISD::SRL; 3271 break; 3272 case ISD::SRL: 3273 NewShiftOpcode = ISD::SHL; 3274 break; 3275 default: 3276 return false; // must be a logical shift. 3277 } 3278 // We should be shifting a constant. 3279 // FIXME: best to use isConstantOrConstantVector(). 3280 C = V.getOperand(0); 3281 ConstantSDNode *CC = 3282 isConstOrConstSplat(C, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3283 if (!CC) 3284 return false; 3285 Y = V.getOperand(1); 3286 3287 ConstantSDNode *XC = 3288 isConstOrConstSplat(X, /*AllowUndefs=*/true, /*AllowTruncation=*/true); 3289 return TLI.shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd( 3290 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG); 3291 }; 3292 3293 // LHS of comparison should be an one-use 'and'. 3294 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse()) 3295 return SDValue(); 3296 3297 X = N0.getOperand(0); 3298 SDValue Mask = N0.getOperand(1); 3299 3300 // 'and' is commutative! 3301 if (!Match(Mask)) { 3302 std::swap(X, Mask); 3303 if (!Match(Mask)) 3304 return SDValue(); 3305 } 3306 3307 EVT VT = X.getValueType(); 3308 3309 // Produce: 3310 // ((X 'OppositeShiftOpcode' Y) & C) Cond 0 3311 SDValue T0 = DAG.getNode(NewShiftOpcode, DL, VT, X, Y); 3312 SDValue T1 = DAG.getNode(ISD::AND, DL, VT, T0, C); 3313 SDValue T2 = DAG.getSetCC(DL, SCCVT, T1, N1C, Cond); 3314 return T2; 3315 } 3316 3317 /// Try to fold an equality comparison with a {add/sub/xor} binary operation as 3318 /// the 1st operand (N0). Callers are expected to swap the N0/N1 parameters to 3319 /// handle the commuted versions of these patterns. 3320 SDValue TargetLowering::foldSetCCWithBinOp(EVT VT, SDValue N0, SDValue N1, 3321 ISD::CondCode Cond, const SDLoc &DL, 3322 DAGCombinerInfo &DCI) const { 3323 unsigned BOpcode = N0.getOpcode(); 3324 assert((BOpcode == ISD::ADD || BOpcode == ISD::SUB || BOpcode == ISD::XOR) && 3325 "Unexpected binop"); 3326 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && "Unexpected condcode"); 3327 3328 // (X + Y) == X --> Y == 0 3329 // (X - Y) == X --> Y == 0 3330 // (X ^ Y) == X --> Y == 0 3331 SelectionDAG &DAG = DCI.DAG; 3332 EVT OpVT = N0.getValueType(); 3333 SDValue X = N0.getOperand(0); 3334 SDValue Y = N0.getOperand(1); 3335 if (X == N1) 3336 return DAG.getSetCC(DL, VT, Y, DAG.getConstant(0, DL, OpVT), Cond); 3337 3338 if (Y != N1) 3339 return SDValue(); 3340 3341 // (X + Y) == Y --> X == 0 3342 // (X ^ Y) == Y --> X == 0 3343 if (BOpcode == ISD::ADD || BOpcode == ISD::XOR) 3344 return DAG.getSetCC(DL, VT, X, DAG.getConstant(0, DL, OpVT), Cond); 3345 3346 // The shift would not be valid if the operands are boolean (i1). 3347 if (!N0.hasOneUse() || OpVT.getScalarSizeInBits() == 1) 3348 return SDValue(); 3349 3350 // (X - Y) == Y --> X == Y << 1 3351 EVT ShiftVT = getShiftAmountTy(OpVT, DAG.getDataLayout(), 3352 !DCI.isBeforeLegalize()); 3353 SDValue One = DAG.getConstant(1, DL, ShiftVT); 3354 SDValue YShl1 = DAG.getNode(ISD::SHL, DL, N1.getValueType(), Y, One); 3355 if (!DCI.isCalledByLegalizer()) 3356 DCI.AddToWorklist(YShl1.getNode()); 3357 return DAG.getSetCC(DL, VT, X, YShl1, Cond); 3358 } 3359 3360 /// Try to simplify a setcc built with the specified operands and cc. If it is 3361 /// unable to simplify it, return a null SDValue. 3362 SDValue TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, 3363 ISD::CondCode Cond, bool foldBooleans, 3364 DAGCombinerInfo &DCI, 3365 const SDLoc &dl) const { 3366 SelectionDAG &DAG = DCI.DAG; 3367 const DataLayout &Layout = DAG.getDataLayout(); 3368 EVT OpVT = N0.getValueType(); 3369 3370 // Constant fold or commute setcc. 3371 if (SDValue Fold = DAG.FoldSetCC(VT, N0, N1, Cond, dl)) 3372 return Fold; 3373 3374 // Ensure that the constant occurs on the RHS and fold constant comparisons. 3375 // TODO: Handle non-splat vector constants. All undef causes trouble. 3376 ISD::CondCode SwappedCC = ISD::getSetCCSwappedOperands(Cond); 3377 if (isConstOrConstSplat(N0) && 3378 (DCI.isBeforeLegalizeOps() || 3379 isCondCodeLegal(SwappedCC, N0.getSimpleValueType()))) 3380 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3381 3382 // If we have a subtract with the same 2 non-constant operands as this setcc 3383 // -- but in reverse order -- then try to commute the operands of this setcc 3384 // to match. A matching pair of setcc (cmp) and sub may be combined into 1 3385 // instruction on some targets. 3386 if (!isConstOrConstSplat(N0) && !isConstOrConstSplat(N1) && 3387 (DCI.isBeforeLegalizeOps() || 3388 isCondCodeLegal(SwappedCC, N0.getSimpleValueType())) && 3389 DAG.getNodeIfExists(ISD::SUB, DAG.getVTList(OpVT), { N1, N0 } ) && 3390 !DAG.getNodeIfExists(ISD::SUB, DAG.getVTList(OpVT), { N0, N1 } )) 3391 return DAG.getSetCC(dl, VT, N1, N0, SwappedCC); 3392 3393 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 3394 const APInt &C1 = N1C->getAPIntValue(); 3395 3396 // If the LHS is '(srl (ctlz x), 5)', the RHS is 0/1, and this is an 3397 // equality comparison, then we're just comparing whether X itself is 3398 // zero. 3399 if (N0.getOpcode() == ISD::SRL && (C1.isNullValue() || C1.isOneValue()) && 3400 N0.getOperand(0).getOpcode() == ISD::CTLZ && 3401 N0.getOperand(1).getOpcode() == ISD::Constant) { 3402 const APInt &ShAmt = N0.getConstantOperandAPInt(1); 3403 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3404 ShAmt == Log2_32(N0.getValueSizeInBits())) { 3405 if ((C1 == 0) == (Cond == ISD::SETEQ)) { 3406 // (srl (ctlz x), 5) == 0 -> X != 0 3407 // (srl (ctlz x), 5) != 1 -> X != 0 3408 Cond = ISD::SETNE; 3409 } else { 3410 // (srl (ctlz x), 5) != 0 -> X == 0 3411 // (srl (ctlz x), 5) == 1 -> X == 0 3412 Cond = ISD::SETEQ; 3413 } 3414 SDValue Zero = DAG.getConstant(0, dl, N0.getValueType()); 3415 return DAG.getSetCC(dl, VT, N0.getOperand(0).getOperand(0), 3416 Zero, Cond); 3417 } 3418 } 3419 3420 SDValue CTPOP = N0; 3421 // Look through truncs that don't change the value of a ctpop. 3422 if (N0.hasOneUse() && N0.getOpcode() == ISD::TRUNCATE) 3423 CTPOP = N0.getOperand(0); 3424 3425 if (CTPOP.hasOneUse() && CTPOP.getOpcode() == ISD::CTPOP && 3426 (N0 == CTPOP || 3427 N0.getValueSizeInBits() > Log2_32_Ceil(CTPOP.getValueSizeInBits()))) { 3428 EVT CTVT = CTPOP.getValueType(); 3429 SDValue CTOp = CTPOP.getOperand(0); 3430 3431 // (ctpop x) u< 2 -> (x & x-1) == 0 3432 // (ctpop x) u> 1 -> (x & x-1) != 0 3433 if ((Cond == ISD::SETULT && C1 == 2) || (Cond == ISD::SETUGT && C1 == 1)){ 3434 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3435 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 3436 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 3437 ISD::CondCode CC = Cond == ISD::SETULT ? ISD::SETEQ : ISD::SETNE; 3438 return DAG.getSetCC(dl, VT, And, DAG.getConstant(0, dl, CTVT), CC); 3439 } 3440 3441 // If ctpop is not supported, expand a power-of-2 comparison based on it. 3442 if (C1 == 1 && !isOperationLegalOrCustom(ISD::CTPOP, CTVT) && 3443 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3444 // (ctpop x) == 1 --> (x != 0) && ((x & x-1) == 0) 3445 // (ctpop x) != 1 --> (x == 0) || ((x & x-1) != 0) 3446 SDValue Zero = DAG.getConstant(0, dl, CTVT); 3447 SDValue NegOne = DAG.getAllOnesConstant(dl, CTVT); 3448 assert(CTVT.isInteger()); 3449 ISD::CondCode InvCond = ISD::getSetCCInverse(Cond, CTVT); 3450 SDValue Add = DAG.getNode(ISD::ADD, dl, CTVT, CTOp, NegOne); 3451 SDValue And = DAG.getNode(ISD::AND, dl, CTVT, CTOp, Add); 3452 SDValue LHS = DAG.getSetCC(dl, VT, CTOp, Zero, InvCond); 3453 SDValue RHS = DAG.getSetCC(dl, VT, And, Zero, Cond); 3454 unsigned LogicOpcode = Cond == ISD::SETEQ ? ISD::AND : ISD::OR; 3455 return DAG.getNode(LogicOpcode, dl, VT, LHS, RHS); 3456 } 3457 } 3458 3459 // (zext x) == C --> x == (trunc C) 3460 // (sext x) == C --> x == (trunc C) 3461 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3462 DCI.isBeforeLegalize() && N0->hasOneUse()) { 3463 unsigned MinBits = N0.getValueSizeInBits(); 3464 SDValue PreExt; 3465 bool Signed = false; 3466 if (N0->getOpcode() == ISD::ZERO_EXTEND) { 3467 // ZExt 3468 MinBits = N0->getOperand(0).getValueSizeInBits(); 3469 PreExt = N0->getOperand(0); 3470 } else if (N0->getOpcode() == ISD::AND) { 3471 // DAGCombine turns costly ZExts into ANDs 3472 if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) 3473 if ((C->getAPIntValue()+1).isPowerOf2()) { 3474 MinBits = C->getAPIntValue().countTrailingOnes(); 3475 PreExt = N0->getOperand(0); 3476 } 3477 } else if (N0->getOpcode() == ISD::SIGN_EXTEND) { 3478 // SExt 3479 MinBits = N0->getOperand(0).getValueSizeInBits(); 3480 PreExt = N0->getOperand(0); 3481 Signed = true; 3482 } else if (auto *LN0 = dyn_cast<LoadSDNode>(N0)) { 3483 // ZEXTLOAD / SEXTLOAD 3484 if (LN0->getExtensionType() == ISD::ZEXTLOAD) { 3485 MinBits = LN0->getMemoryVT().getSizeInBits(); 3486 PreExt = N0; 3487 } else if (LN0->getExtensionType() == ISD::SEXTLOAD) { 3488 Signed = true; 3489 MinBits = LN0->getMemoryVT().getSizeInBits(); 3490 PreExt = N0; 3491 } 3492 } 3493 3494 // Figure out how many bits we need to preserve this constant. 3495 unsigned ReqdBits = Signed ? 3496 C1.getBitWidth() - C1.getNumSignBits() + 1 : 3497 C1.getActiveBits(); 3498 3499 // Make sure we're not losing bits from the constant. 3500 if (MinBits > 0 && 3501 MinBits < C1.getBitWidth() && 3502 MinBits >= ReqdBits) { 3503 EVT MinVT = EVT::getIntegerVT(*DAG.getContext(), MinBits); 3504 if (isTypeDesirableForOp(ISD::SETCC, MinVT)) { 3505 // Will get folded away. 3506 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, MinVT, PreExt); 3507 if (MinBits == 1 && C1 == 1) 3508 // Invert the condition. 3509 return DAG.getSetCC(dl, VT, Trunc, DAG.getConstant(0, dl, MVT::i1), 3510 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3511 SDValue C = DAG.getConstant(C1.trunc(MinBits), dl, MinVT); 3512 return DAG.getSetCC(dl, VT, Trunc, C, Cond); 3513 } 3514 3515 // If truncating the setcc operands is not desirable, we can still 3516 // simplify the expression in some cases: 3517 // setcc ([sz]ext (setcc x, y, cc)), 0, setne) -> setcc (x, y, cc) 3518 // setcc ([sz]ext (setcc x, y, cc)), 0, seteq) -> setcc (x, y, inv(cc)) 3519 // setcc (zext (setcc x, y, cc)), 1, setne) -> setcc (x, y, inv(cc)) 3520 // setcc (zext (setcc x, y, cc)), 1, seteq) -> setcc (x, y, cc) 3521 // setcc (sext (setcc x, y, cc)), -1, setne) -> setcc (x, y, inv(cc)) 3522 // setcc (sext (setcc x, y, cc)), -1, seteq) -> setcc (x, y, cc) 3523 SDValue TopSetCC = N0->getOperand(0); 3524 unsigned N0Opc = N0->getOpcode(); 3525 bool SExt = (N0Opc == ISD::SIGN_EXTEND); 3526 if (TopSetCC.getValueType() == MVT::i1 && VT == MVT::i1 && 3527 TopSetCC.getOpcode() == ISD::SETCC && 3528 (N0Opc == ISD::ZERO_EXTEND || N0Opc == ISD::SIGN_EXTEND) && 3529 (isConstFalseVal(N1C) || 3530 isExtendedTrueVal(N1C, N0->getValueType(0), SExt))) { 3531 3532 bool Inverse = (N1C->isNullValue() && Cond == ISD::SETEQ) || 3533 (!N1C->isNullValue() && Cond == ISD::SETNE); 3534 3535 if (!Inverse) 3536 return TopSetCC; 3537 3538 ISD::CondCode InvCond = ISD::getSetCCInverse( 3539 cast<CondCodeSDNode>(TopSetCC.getOperand(2))->get(), 3540 TopSetCC.getOperand(0).getValueType()); 3541 return DAG.getSetCC(dl, VT, TopSetCC.getOperand(0), 3542 TopSetCC.getOperand(1), 3543 InvCond); 3544 } 3545 } 3546 } 3547 3548 // If the LHS is '(and load, const)', the RHS is 0, the test is for 3549 // equality or unsigned, and all 1 bits of the const are in the same 3550 // partial word, see if we can shorten the load. 3551 if (DCI.isBeforeLegalize() && 3552 !ISD::isSignedIntSetCC(Cond) && 3553 N0.getOpcode() == ISD::AND && C1 == 0 && 3554 N0.getNode()->hasOneUse() && 3555 isa<LoadSDNode>(N0.getOperand(0)) && 3556 N0.getOperand(0).getNode()->hasOneUse() && 3557 isa<ConstantSDNode>(N0.getOperand(1))) { 3558 LoadSDNode *Lod = cast<LoadSDNode>(N0.getOperand(0)); 3559 APInt bestMask; 3560 unsigned bestWidth = 0, bestOffset = 0; 3561 if (Lod->isSimple() && Lod->isUnindexed()) { 3562 unsigned origWidth = N0.getValueSizeInBits(); 3563 unsigned maskWidth = origWidth; 3564 // We can narrow (e.g.) 16-bit extending loads on 32-bit target to 3565 // 8 bits, but have to be careful... 3566 if (Lod->getExtensionType() != ISD::NON_EXTLOAD) 3567 origWidth = Lod->getMemoryVT().getSizeInBits(); 3568 const APInt &Mask = N0.getConstantOperandAPInt(1); 3569 for (unsigned width = origWidth / 2; width>=8; width /= 2) { 3570 APInt newMask = APInt::getLowBitsSet(maskWidth, width); 3571 for (unsigned offset=0; offset<origWidth/width; offset++) { 3572 if (Mask.isSubsetOf(newMask)) { 3573 if (Layout.isLittleEndian()) 3574 bestOffset = (uint64_t)offset * (width/8); 3575 else 3576 bestOffset = (origWidth/width - offset - 1) * (width/8); 3577 bestMask = Mask.lshr(offset * (width/8) * 8); 3578 bestWidth = width; 3579 break; 3580 } 3581 newMask <<= width; 3582 } 3583 } 3584 } 3585 if (bestWidth) { 3586 EVT newVT = EVT::getIntegerVT(*DAG.getContext(), bestWidth); 3587 if (newVT.isRound() && 3588 shouldReduceLoadWidth(Lod, ISD::NON_EXTLOAD, newVT)) { 3589 SDValue Ptr = Lod->getBasePtr(); 3590 if (bestOffset != 0) 3591 Ptr = DAG.getMemBasePlusOffset(Ptr, bestOffset, dl); 3592 unsigned NewAlign = MinAlign(Lod->getAlignment(), bestOffset); 3593 SDValue NewLoad = DAG.getLoad( 3594 newVT, dl, Lod->getChain(), Ptr, 3595 Lod->getPointerInfo().getWithOffset(bestOffset), NewAlign); 3596 return DAG.getSetCC(dl, VT, 3597 DAG.getNode(ISD::AND, dl, newVT, NewLoad, 3598 DAG.getConstant(bestMask.trunc(bestWidth), 3599 dl, newVT)), 3600 DAG.getConstant(0LL, dl, newVT), Cond); 3601 } 3602 } 3603 } 3604 3605 // If the LHS is a ZERO_EXTEND, perform the comparison on the input. 3606 if (N0.getOpcode() == ISD::ZERO_EXTEND) { 3607 unsigned InSize = N0.getOperand(0).getValueSizeInBits(); 3608 3609 // If the comparison constant has bits in the upper part, the 3610 // zero-extended value could never match. 3611 if (C1.intersects(APInt::getHighBitsSet(C1.getBitWidth(), 3612 C1.getBitWidth() - InSize))) { 3613 switch (Cond) { 3614 case ISD::SETUGT: 3615 case ISD::SETUGE: 3616 case ISD::SETEQ: 3617 return DAG.getConstant(0, dl, VT); 3618 case ISD::SETULT: 3619 case ISD::SETULE: 3620 case ISD::SETNE: 3621 return DAG.getConstant(1, dl, VT); 3622 case ISD::SETGT: 3623 case ISD::SETGE: 3624 // True if the sign bit of C1 is set. 3625 return DAG.getConstant(C1.isNegative(), dl, VT); 3626 case ISD::SETLT: 3627 case ISD::SETLE: 3628 // True if the sign bit of C1 isn't set. 3629 return DAG.getConstant(C1.isNonNegative(), dl, VT); 3630 default: 3631 break; 3632 } 3633 } 3634 3635 // Otherwise, we can perform the comparison with the low bits. 3636 switch (Cond) { 3637 case ISD::SETEQ: 3638 case ISD::SETNE: 3639 case ISD::SETUGT: 3640 case ISD::SETUGE: 3641 case ISD::SETULT: 3642 case ISD::SETULE: { 3643 EVT newVT = N0.getOperand(0).getValueType(); 3644 if (DCI.isBeforeLegalizeOps() || 3645 (isOperationLegal(ISD::SETCC, newVT) && 3646 isCondCodeLegal(Cond, newVT.getSimpleVT()))) { 3647 EVT NewSetCCVT = getSetCCResultType(Layout, *DAG.getContext(), newVT); 3648 SDValue NewConst = DAG.getConstant(C1.trunc(InSize), dl, newVT); 3649 3650 SDValue NewSetCC = DAG.getSetCC(dl, NewSetCCVT, N0.getOperand(0), 3651 NewConst, Cond); 3652 return DAG.getBoolExtOrTrunc(NewSetCC, dl, VT, N0.getValueType()); 3653 } 3654 break; 3655 } 3656 default: 3657 break; // todo, be more careful with signed comparisons 3658 } 3659 } else if (N0.getOpcode() == ISD::SIGN_EXTEND_INREG && 3660 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3661 EVT ExtSrcTy = cast<VTSDNode>(N0.getOperand(1))->getVT(); 3662 unsigned ExtSrcTyBits = ExtSrcTy.getSizeInBits(); 3663 EVT ExtDstTy = N0.getValueType(); 3664 unsigned ExtDstTyBits = ExtDstTy.getSizeInBits(); 3665 3666 // If the constant doesn't fit into the number of bits for the source of 3667 // the sign extension, it is impossible for both sides to be equal. 3668 if (C1.getMinSignedBits() > ExtSrcTyBits) 3669 return DAG.getConstant(Cond == ISD::SETNE, dl, VT); 3670 3671 SDValue ZextOp; 3672 EVT Op0Ty = N0.getOperand(0).getValueType(); 3673 if (Op0Ty == ExtSrcTy) { 3674 ZextOp = N0.getOperand(0); 3675 } else { 3676 APInt Imm = APInt::getLowBitsSet(ExtDstTyBits, ExtSrcTyBits); 3677 ZextOp = DAG.getNode(ISD::AND, dl, Op0Ty, N0.getOperand(0), 3678 DAG.getConstant(Imm, dl, Op0Ty)); 3679 } 3680 if (!DCI.isCalledByLegalizer()) 3681 DCI.AddToWorklist(ZextOp.getNode()); 3682 // Otherwise, make this a use of a zext. 3683 return DAG.getSetCC(dl, VT, ZextOp, 3684 DAG.getConstant(C1 & APInt::getLowBitsSet( 3685 ExtDstTyBits, 3686 ExtSrcTyBits), 3687 dl, ExtDstTy), 3688 Cond); 3689 } else if ((N1C->isNullValue() || N1C->isOne()) && 3690 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3691 // SETCC (SETCC), [0|1], [EQ|NE] -> SETCC 3692 if (N0.getOpcode() == ISD::SETCC && 3693 isTypeLegal(VT) && VT.bitsLE(N0.getValueType()) && 3694 (N0.getValueType() == MVT::i1 || 3695 getBooleanContents(N0.getOperand(0).getValueType()) == 3696 ZeroOrOneBooleanContent)) { 3697 bool TrueWhenTrue = (Cond == ISD::SETEQ) ^ (!N1C->isOne()); 3698 if (TrueWhenTrue) 3699 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0); 3700 // Invert the condition. 3701 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get(); 3702 CC = ISD::getSetCCInverse(CC, N0.getOperand(0).getValueType()); 3703 if (DCI.isBeforeLegalizeOps() || 3704 isCondCodeLegal(CC, N0.getOperand(0).getSimpleValueType())) 3705 return DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC); 3706 } 3707 3708 if ((N0.getOpcode() == ISD::XOR || 3709 (N0.getOpcode() == ISD::AND && 3710 N0.getOperand(0).getOpcode() == ISD::XOR && 3711 N0.getOperand(1) == N0.getOperand(0).getOperand(1))) && 3712 isa<ConstantSDNode>(N0.getOperand(1)) && 3713 cast<ConstantSDNode>(N0.getOperand(1))->isOne()) { 3714 // If this is (X^1) == 0/1, swap the RHS and eliminate the xor. We 3715 // can only do this if the top bits are known zero. 3716 unsigned BitWidth = N0.getValueSizeInBits(); 3717 if (DAG.MaskedValueIsZero(N0, 3718 APInt::getHighBitsSet(BitWidth, 3719 BitWidth-1))) { 3720 // Okay, get the un-inverted input value. 3721 SDValue Val; 3722 if (N0.getOpcode() == ISD::XOR) { 3723 Val = N0.getOperand(0); 3724 } else { 3725 assert(N0.getOpcode() == ISD::AND && 3726 N0.getOperand(0).getOpcode() == ISD::XOR); 3727 // ((X^1)&1)^1 -> X & 1 3728 Val = DAG.getNode(ISD::AND, dl, N0.getValueType(), 3729 N0.getOperand(0).getOperand(0), 3730 N0.getOperand(1)); 3731 } 3732 3733 return DAG.getSetCC(dl, VT, Val, N1, 3734 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3735 } 3736 } else if (N1C->isOne()) { 3737 SDValue Op0 = N0; 3738 if (Op0.getOpcode() == ISD::TRUNCATE) 3739 Op0 = Op0.getOperand(0); 3740 3741 if ((Op0.getOpcode() == ISD::XOR) && 3742 Op0.getOperand(0).getOpcode() == ISD::SETCC && 3743 Op0.getOperand(1).getOpcode() == ISD::SETCC) { 3744 SDValue XorLHS = Op0.getOperand(0); 3745 SDValue XorRHS = Op0.getOperand(1); 3746 // Ensure that the input setccs return an i1 type or 0/1 value. 3747 if (Op0.getValueType() == MVT::i1 || 3748 (getBooleanContents(XorLHS.getOperand(0).getValueType()) == 3749 ZeroOrOneBooleanContent && 3750 getBooleanContents(XorRHS.getOperand(0).getValueType()) == 3751 ZeroOrOneBooleanContent)) { 3752 // (xor (setcc), (setcc)) == / != 1 -> (setcc) != / == (setcc) 3753 Cond = (Cond == ISD::SETEQ) ? ISD::SETNE : ISD::SETEQ; 3754 return DAG.getSetCC(dl, VT, XorLHS, XorRHS, Cond); 3755 } 3756 } 3757 if (Op0.getOpcode() == ISD::AND && 3758 isa<ConstantSDNode>(Op0.getOperand(1)) && 3759 cast<ConstantSDNode>(Op0.getOperand(1))->isOne()) { 3760 // If this is (X&1) == / != 1, normalize it to (X&1) != / == 0. 3761 if (Op0.getValueType().bitsGT(VT)) 3762 Op0 = DAG.getNode(ISD::AND, dl, VT, 3763 DAG.getNode(ISD::TRUNCATE, dl, VT, Op0.getOperand(0)), 3764 DAG.getConstant(1, dl, VT)); 3765 else if (Op0.getValueType().bitsLT(VT)) 3766 Op0 = DAG.getNode(ISD::AND, dl, VT, 3767 DAG.getNode(ISD::ANY_EXTEND, dl, VT, Op0.getOperand(0)), 3768 DAG.getConstant(1, dl, VT)); 3769 3770 return DAG.getSetCC(dl, VT, Op0, 3771 DAG.getConstant(0, dl, Op0.getValueType()), 3772 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3773 } 3774 if (Op0.getOpcode() == ISD::AssertZext && 3775 cast<VTSDNode>(Op0.getOperand(1))->getVT() == MVT::i1) 3776 return DAG.getSetCC(dl, VT, Op0, 3777 DAG.getConstant(0, dl, Op0.getValueType()), 3778 Cond == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ); 3779 } 3780 } 3781 3782 // Given: 3783 // icmp eq/ne (urem %x, %y), 0 3784 // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem': 3785 // icmp eq/ne %x, 0 3786 if (N0.getOpcode() == ISD::UREM && N1C->isNullValue() && 3787 (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 3788 KnownBits XKnown = DAG.computeKnownBits(N0.getOperand(0)); 3789 KnownBits YKnown = DAG.computeKnownBits(N0.getOperand(1)); 3790 if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2) 3791 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1, Cond); 3792 } 3793 3794 if (SDValue V = 3795 optimizeSetCCOfSignedTruncationCheck(VT, N0, N1, Cond, DCI, dl)) 3796 return V; 3797 } 3798 3799 // These simplifications apply to splat vectors as well. 3800 // TODO: Handle more splat vector cases. 3801 if (auto *N1C = isConstOrConstSplat(N1)) { 3802 const APInt &C1 = N1C->getAPIntValue(); 3803 3804 APInt MinVal, MaxVal; 3805 unsigned OperandBitSize = N1C->getValueType(0).getScalarSizeInBits(); 3806 if (ISD::isSignedIntSetCC(Cond)) { 3807 MinVal = APInt::getSignedMinValue(OperandBitSize); 3808 MaxVal = APInt::getSignedMaxValue(OperandBitSize); 3809 } else { 3810 MinVal = APInt::getMinValue(OperandBitSize); 3811 MaxVal = APInt::getMaxValue(OperandBitSize); 3812 } 3813 3814 // Canonicalize GE/LE comparisons to use GT/LT comparisons. 3815 if (Cond == ISD::SETGE || Cond == ISD::SETUGE) { 3816 // X >= MIN --> true 3817 if (C1 == MinVal) 3818 return DAG.getBoolConstant(true, dl, VT, OpVT); 3819 3820 if (!VT.isVector()) { // TODO: Support this for vectors. 3821 // X >= C0 --> X > (C0 - 1) 3822 APInt C = C1 - 1; 3823 ISD::CondCode NewCC = (Cond == ISD::SETGE) ? ISD::SETGT : ISD::SETUGT; 3824 if ((DCI.isBeforeLegalizeOps() || 3825 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 3826 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 3827 isLegalICmpImmediate(C.getSExtValue())))) { 3828 return DAG.getSetCC(dl, VT, N0, 3829 DAG.getConstant(C, dl, N1.getValueType()), 3830 NewCC); 3831 } 3832 } 3833 } 3834 3835 if (Cond == ISD::SETLE || Cond == ISD::SETULE) { 3836 // X <= MAX --> true 3837 if (C1 == MaxVal) 3838 return DAG.getBoolConstant(true, dl, VT, OpVT); 3839 3840 // X <= C0 --> X < (C0 + 1) 3841 if (!VT.isVector()) { // TODO: Support this for vectors. 3842 APInt C = C1 + 1; 3843 ISD::CondCode NewCC = (Cond == ISD::SETLE) ? ISD::SETLT : ISD::SETULT; 3844 if ((DCI.isBeforeLegalizeOps() || 3845 isCondCodeLegal(NewCC, VT.getSimpleVT())) && 3846 (!N1C->isOpaque() || (C.getBitWidth() <= 64 && 3847 isLegalICmpImmediate(C.getSExtValue())))) { 3848 return DAG.getSetCC(dl, VT, N0, 3849 DAG.getConstant(C, dl, N1.getValueType()), 3850 NewCC); 3851 } 3852 } 3853 } 3854 3855 if (Cond == ISD::SETLT || Cond == ISD::SETULT) { 3856 if (C1 == MinVal) 3857 return DAG.getBoolConstant(false, dl, VT, OpVT); // X < MIN --> false 3858 3859 // TODO: Support this for vectors after legalize ops. 3860 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3861 // Canonicalize setlt X, Max --> setne X, Max 3862 if (C1 == MaxVal) 3863 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 3864 3865 // If we have setult X, 1, turn it into seteq X, 0 3866 if (C1 == MinVal+1) 3867 return DAG.getSetCC(dl, VT, N0, 3868 DAG.getConstant(MinVal, dl, N0.getValueType()), 3869 ISD::SETEQ); 3870 } 3871 } 3872 3873 if (Cond == ISD::SETGT || Cond == ISD::SETUGT) { 3874 if (C1 == MaxVal) 3875 return DAG.getBoolConstant(false, dl, VT, OpVT); // X > MAX --> false 3876 3877 // TODO: Support this for vectors after legalize ops. 3878 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3879 // Canonicalize setgt X, Min --> setne X, Min 3880 if (C1 == MinVal) 3881 return DAG.getSetCC(dl, VT, N0, N1, ISD::SETNE); 3882 3883 // If we have setugt X, Max-1, turn it into seteq X, Max 3884 if (C1 == MaxVal-1) 3885 return DAG.getSetCC(dl, VT, N0, 3886 DAG.getConstant(MaxVal, dl, N0.getValueType()), 3887 ISD::SETEQ); 3888 } 3889 } 3890 3891 if (Cond == ISD::SETEQ || Cond == ISD::SETNE) { 3892 // (X & (C l>>/<< Y)) ==/!= 0 --> ((X <</l>> Y) & C) ==/!= 0 3893 if (C1.isNullValue()) 3894 if (SDValue CC = optimizeSetCCByHoistingAndByConstFromLogicalShift( 3895 VT, N0, N1, Cond, DCI, dl)) 3896 return CC; 3897 } 3898 3899 // If we have "setcc X, C0", check to see if we can shrink the immediate 3900 // by changing cc. 3901 // TODO: Support this for vectors after legalize ops. 3902 if (!VT.isVector() || DCI.isBeforeLegalizeOps()) { 3903 // SETUGT X, SINTMAX -> SETLT X, 0 3904 if (Cond == ISD::SETUGT && 3905 C1 == APInt::getSignedMaxValue(OperandBitSize)) 3906 return DAG.getSetCC(dl, VT, N0, 3907 DAG.getConstant(0, dl, N1.getValueType()), 3908 ISD::SETLT); 3909 3910 // SETULT X, SINTMIN -> SETGT X, -1 3911 if (Cond == ISD::SETULT && 3912 C1 == APInt::getSignedMinValue(OperandBitSize)) { 3913 SDValue ConstMinusOne = 3914 DAG.getConstant(APInt::getAllOnesValue(OperandBitSize), dl, 3915 N1.getValueType()); 3916 return DAG.getSetCC(dl, VT, N0, ConstMinusOne, ISD::SETGT); 3917 } 3918 } 3919 } 3920 3921 // Back to non-vector simplifications. 3922 // TODO: Can we do these for vector splats? 3923 if (auto *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { 3924 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3925 const APInt &C1 = N1C->getAPIntValue(); 3926 EVT ShValTy = N0.getValueType(); 3927 3928 // Fold bit comparisons when we can. 3929 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3930 (VT == ShValTy || (isTypeLegal(VT) && VT.bitsLE(ShValTy))) && 3931 N0.getOpcode() == ISD::AND) { 3932 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3933 EVT ShiftTy = 3934 getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 3935 if (Cond == ISD::SETNE && C1 == 0) {// (X & 8) != 0 --> (X & 8) >> 3 3936 // Perform the xform if the AND RHS is a single bit. 3937 unsigned ShCt = AndRHS->getAPIntValue().logBase2(); 3938 if (AndRHS->getAPIntValue().isPowerOf2() && 3939 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 3940 return DAG.getNode(ISD::TRUNCATE, dl, VT, 3941 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 3942 DAG.getConstant(ShCt, dl, ShiftTy))); 3943 } 3944 } else if (Cond == ISD::SETEQ && C1 == AndRHS->getAPIntValue()) { 3945 // (X & 8) == 8 --> (X & 8) >> 3 3946 // Perform the xform if C1 is a single bit. 3947 unsigned ShCt = C1.logBase2(); 3948 if (C1.isPowerOf2() && 3949 !TLI.shouldAvoidTransformToShift(ShValTy, ShCt)) { 3950 return DAG.getNode(ISD::TRUNCATE, dl, VT, 3951 DAG.getNode(ISD::SRL, dl, ShValTy, N0, 3952 DAG.getConstant(ShCt, dl, ShiftTy))); 3953 } 3954 } 3955 } 3956 } 3957 3958 if (C1.getMinSignedBits() <= 64 && 3959 !isLegalICmpImmediate(C1.getSExtValue())) { 3960 EVT ShiftTy = getShiftAmountTy(ShValTy, Layout, !DCI.isBeforeLegalize()); 3961 // (X & -256) == 256 -> (X >> 8) == 1 3962 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 3963 N0.getOpcode() == ISD::AND && N0.hasOneUse()) { 3964 if (auto *AndRHS = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 3965 const APInt &AndRHSC = AndRHS->getAPIntValue(); 3966 if ((-AndRHSC).isPowerOf2() && (AndRHSC & C1) == C1) { 3967 unsigned ShiftBits = AndRHSC.countTrailingZeros(); 3968 if (!TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 3969 SDValue Shift = 3970 DAG.getNode(ISD::SRL, dl, ShValTy, N0.getOperand(0), 3971 DAG.getConstant(ShiftBits, dl, ShiftTy)); 3972 SDValue CmpRHS = DAG.getConstant(C1.lshr(ShiftBits), dl, ShValTy); 3973 return DAG.getSetCC(dl, VT, Shift, CmpRHS, Cond); 3974 } 3975 } 3976 } 3977 } else if (Cond == ISD::SETULT || Cond == ISD::SETUGE || 3978 Cond == ISD::SETULE || Cond == ISD::SETUGT) { 3979 bool AdjOne = (Cond == ISD::SETULE || Cond == ISD::SETUGT); 3980 // X < 0x100000000 -> (X >> 32) < 1 3981 // X >= 0x100000000 -> (X >> 32) >= 1 3982 // X <= 0x0ffffffff -> (X >> 32) < 1 3983 // X > 0x0ffffffff -> (X >> 32) >= 1 3984 unsigned ShiftBits; 3985 APInt NewC = C1; 3986 ISD::CondCode NewCond = Cond; 3987 if (AdjOne) { 3988 ShiftBits = C1.countTrailingOnes(); 3989 NewC = NewC + 1; 3990 NewCond = (Cond == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 3991 } else { 3992 ShiftBits = C1.countTrailingZeros(); 3993 } 3994 NewC.lshrInPlace(ShiftBits); 3995 if (ShiftBits && NewC.getMinSignedBits() <= 64 && 3996 isLegalICmpImmediate(NewC.getSExtValue()) && 3997 !TLI.shouldAvoidTransformToShift(ShValTy, ShiftBits)) { 3998 SDValue Shift = DAG.getNode(ISD::SRL, dl, ShValTy, N0, 3999 DAG.getConstant(ShiftBits, dl, ShiftTy)); 4000 SDValue CmpRHS = DAG.getConstant(NewC, dl, ShValTy); 4001 return DAG.getSetCC(dl, VT, Shift, CmpRHS, NewCond); 4002 } 4003 } 4004 } 4005 } 4006 4007 if (!isa<ConstantFPSDNode>(N0) && isa<ConstantFPSDNode>(N1)) { 4008 auto *CFP = cast<ConstantFPSDNode>(N1); 4009 assert(!CFP->getValueAPF().isNaN() && "Unexpected NaN value"); 4010 4011 // Otherwise, we know the RHS is not a NaN. Simplify the node to drop the 4012 // constant if knowing that the operand is non-nan is enough. We prefer to 4013 // have SETO(x,x) instead of SETO(x, 0.0) because this avoids having to 4014 // materialize 0.0. 4015 if (Cond == ISD::SETO || Cond == ISD::SETUO) 4016 return DAG.getSetCC(dl, VT, N0, N0, Cond); 4017 4018 // setcc (fneg x), C -> setcc swap(pred) x, -C 4019 if (N0.getOpcode() == ISD::FNEG) { 4020 ISD::CondCode SwapCond = ISD::getSetCCSwappedOperands(Cond); 4021 if (DCI.isBeforeLegalizeOps() || 4022 isCondCodeLegal(SwapCond, N0.getSimpleValueType())) { 4023 SDValue NegN1 = DAG.getNode(ISD::FNEG, dl, N0.getValueType(), N1); 4024 return DAG.getSetCC(dl, VT, N0.getOperand(0), NegN1, SwapCond); 4025 } 4026 } 4027 4028 // If the condition is not legal, see if we can find an equivalent one 4029 // which is legal. 4030 if (!isCondCodeLegal(Cond, N0.getSimpleValueType())) { 4031 // If the comparison was an awkward floating-point == or != and one of 4032 // the comparison operands is infinity or negative infinity, convert the 4033 // condition to a less-awkward <= or >=. 4034 if (CFP->getValueAPF().isInfinity()) { 4035 bool IsNegInf = CFP->getValueAPF().isNegative(); 4036 ISD::CondCode NewCond = ISD::SETCC_INVALID; 4037 switch (Cond) { 4038 case ISD::SETOEQ: NewCond = IsNegInf ? ISD::SETOLE : ISD::SETOGE; break; 4039 case ISD::SETUEQ: NewCond = IsNegInf ? ISD::SETULE : ISD::SETUGE; break; 4040 case ISD::SETUNE: NewCond = IsNegInf ? ISD::SETUGT : ISD::SETULT; break; 4041 case ISD::SETONE: NewCond = IsNegInf ? ISD::SETOGT : ISD::SETOLT; break; 4042 default: break; 4043 } 4044 if (NewCond != ISD::SETCC_INVALID && 4045 isCondCodeLegal(NewCond, N0.getSimpleValueType())) 4046 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4047 } 4048 } 4049 } 4050 4051 if (N0 == N1) { 4052 // The sext(setcc()) => setcc() optimization relies on the appropriate 4053 // constant being emitted. 4054 assert(!N0.getValueType().isInteger() && 4055 "Integer types should be handled by FoldSetCC"); 4056 4057 bool EqTrue = ISD::isTrueWhenEqual(Cond); 4058 unsigned UOF = ISD::getUnorderedFlavor(Cond); 4059 if (UOF == 2) // FP operators that are undefined on NaNs. 4060 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4061 if (UOF == unsigned(EqTrue)) 4062 return DAG.getBoolConstant(EqTrue, dl, VT, OpVT); 4063 // Otherwise, we can't fold it. However, we can simplify it to SETUO/SETO 4064 // if it is not already. 4065 ISD::CondCode NewCond = UOF == 0 ? ISD::SETO : ISD::SETUO; 4066 if (NewCond != Cond && 4067 (DCI.isBeforeLegalizeOps() || 4068 isCondCodeLegal(NewCond, N0.getSimpleValueType()))) 4069 return DAG.getSetCC(dl, VT, N0, N1, NewCond); 4070 } 4071 4072 if ((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 4073 N0.getValueType().isInteger()) { 4074 if (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::SUB || 4075 N0.getOpcode() == ISD::XOR) { 4076 // Simplify (X+Y) == (X+Z) --> Y == Z 4077 if (N0.getOpcode() == N1.getOpcode()) { 4078 if (N0.getOperand(0) == N1.getOperand(0)) 4079 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(1), Cond); 4080 if (N0.getOperand(1) == N1.getOperand(1)) 4081 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(0), Cond); 4082 if (isCommutativeBinOp(N0.getOpcode())) { 4083 // If X op Y == Y op X, try other combinations. 4084 if (N0.getOperand(0) == N1.getOperand(1)) 4085 return DAG.getSetCC(dl, VT, N0.getOperand(1), N1.getOperand(0), 4086 Cond); 4087 if (N0.getOperand(1) == N1.getOperand(0)) 4088 return DAG.getSetCC(dl, VT, N0.getOperand(0), N1.getOperand(1), 4089 Cond); 4090 } 4091 } 4092 4093 // If RHS is a legal immediate value for a compare instruction, we need 4094 // to be careful about increasing register pressure needlessly. 4095 bool LegalRHSImm = false; 4096 4097 if (auto *RHSC = dyn_cast<ConstantSDNode>(N1)) { 4098 if (auto *LHSR = dyn_cast<ConstantSDNode>(N0.getOperand(1))) { 4099 // Turn (X+C1) == C2 --> X == C2-C1 4100 if (N0.getOpcode() == ISD::ADD && N0.getNode()->hasOneUse()) { 4101 return DAG.getSetCC(dl, VT, N0.getOperand(0), 4102 DAG.getConstant(RHSC->getAPIntValue()- 4103 LHSR->getAPIntValue(), 4104 dl, N0.getValueType()), Cond); 4105 } 4106 4107 // Turn (X^C1) == C2 into X == C1^C2 iff X&~C1 = 0. 4108 if (N0.getOpcode() == ISD::XOR) 4109 // If we know that all of the inverted bits are zero, don't bother 4110 // performing the inversion. 4111 if (DAG.MaskedValueIsZero(N0.getOperand(0), ~LHSR->getAPIntValue())) 4112 return 4113 DAG.getSetCC(dl, VT, N0.getOperand(0), 4114 DAG.getConstant(LHSR->getAPIntValue() ^ 4115 RHSC->getAPIntValue(), 4116 dl, N0.getValueType()), 4117 Cond); 4118 } 4119 4120 // Turn (C1-X) == C2 --> X == C1-C2 4121 if (auto *SUBC = dyn_cast<ConstantSDNode>(N0.getOperand(0))) { 4122 if (N0.getOpcode() == ISD::SUB && N0.getNode()->hasOneUse()) { 4123 return 4124 DAG.getSetCC(dl, VT, N0.getOperand(1), 4125 DAG.getConstant(SUBC->getAPIntValue() - 4126 RHSC->getAPIntValue(), 4127 dl, N0.getValueType()), 4128 Cond); 4129 } 4130 } 4131 4132 // Could RHSC fold directly into a compare? 4133 if (RHSC->getValueType(0).getSizeInBits() <= 64) 4134 LegalRHSImm = isLegalICmpImmediate(RHSC->getSExtValue()); 4135 } 4136 4137 // (X+Y) == X --> Y == 0 and similar folds. 4138 // Don't do this if X is an immediate that can fold into a cmp 4139 // instruction and X+Y has other uses. It could be an induction variable 4140 // chain, and the transform would increase register pressure. 4141 if (!LegalRHSImm || N0.hasOneUse()) 4142 if (SDValue V = foldSetCCWithBinOp(VT, N0, N1, Cond, dl, DCI)) 4143 return V; 4144 } 4145 4146 if (N1.getOpcode() == ISD::ADD || N1.getOpcode() == ISD::SUB || 4147 N1.getOpcode() == ISD::XOR) 4148 if (SDValue V = foldSetCCWithBinOp(VT, N1, N0, Cond, dl, DCI)) 4149 return V; 4150 4151 if (SDValue V = foldSetCCWithAnd(VT, N0, N1, Cond, dl, DCI)) 4152 return V; 4153 } 4154 4155 // Fold remainder of division by a constant. 4156 if ((N0.getOpcode() == ISD::UREM || N0.getOpcode() == ISD::SREM) && 4157 N0.hasOneUse() && (Cond == ISD::SETEQ || Cond == ISD::SETNE)) { 4158 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4159 4160 // When division is cheap or optimizing for minimum size, 4161 // fall through to DIVREM creation by skipping this fold. 4162 if (!isIntDivCheap(VT, Attr) && !Attr.hasFnAttribute(Attribute::MinSize)) { 4163 if (N0.getOpcode() == ISD::UREM) { 4164 if (SDValue Folded = buildUREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4165 return Folded; 4166 } else if (N0.getOpcode() == ISD::SREM) { 4167 if (SDValue Folded = buildSREMEqFold(VT, N0, N1, Cond, DCI, dl)) 4168 return Folded; 4169 } 4170 } 4171 } 4172 4173 // Fold away ALL boolean setcc's. 4174 if (N0.getValueType().getScalarType() == MVT::i1 && foldBooleans) { 4175 SDValue Temp; 4176 switch (Cond) { 4177 default: llvm_unreachable("Unknown integer setcc!"); 4178 case ISD::SETEQ: // X == Y -> ~(X^Y) 4179 Temp = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4180 N0 = DAG.getNOT(dl, Temp, OpVT); 4181 if (!DCI.isCalledByLegalizer()) 4182 DCI.AddToWorklist(Temp.getNode()); 4183 break; 4184 case ISD::SETNE: // X != Y --> (X^Y) 4185 N0 = DAG.getNode(ISD::XOR, dl, OpVT, N0, N1); 4186 break; 4187 case ISD::SETGT: // X >s Y --> X == 0 & Y == 1 --> ~X & Y 4188 case ISD::SETULT: // X <u Y --> X == 0 & Y == 1 --> ~X & Y 4189 Temp = DAG.getNOT(dl, N0, OpVT); 4190 N0 = DAG.getNode(ISD::AND, dl, OpVT, N1, Temp); 4191 if (!DCI.isCalledByLegalizer()) 4192 DCI.AddToWorklist(Temp.getNode()); 4193 break; 4194 case ISD::SETLT: // X <s Y --> X == 1 & Y == 0 --> ~Y & X 4195 case ISD::SETUGT: // X >u Y --> X == 1 & Y == 0 --> ~Y & X 4196 Temp = DAG.getNOT(dl, N1, OpVT); 4197 N0 = DAG.getNode(ISD::AND, dl, OpVT, N0, Temp); 4198 if (!DCI.isCalledByLegalizer()) 4199 DCI.AddToWorklist(Temp.getNode()); 4200 break; 4201 case ISD::SETULE: // X <=u Y --> X == 0 | Y == 1 --> ~X | Y 4202 case ISD::SETGE: // X >=s Y --> X == 0 | Y == 1 --> ~X | Y 4203 Temp = DAG.getNOT(dl, N0, OpVT); 4204 N0 = DAG.getNode(ISD::OR, dl, OpVT, N1, Temp); 4205 if (!DCI.isCalledByLegalizer()) 4206 DCI.AddToWorklist(Temp.getNode()); 4207 break; 4208 case ISD::SETUGE: // X >=u Y --> X == 1 | Y == 0 --> ~Y | X 4209 case ISD::SETLE: // X <=s Y --> X == 1 | Y == 0 --> ~Y | X 4210 Temp = DAG.getNOT(dl, N1, OpVT); 4211 N0 = DAG.getNode(ISD::OR, dl, OpVT, N0, Temp); 4212 break; 4213 } 4214 if (VT.getScalarType() != MVT::i1) { 4215 if (!DCI.isCalledByLegalizer()) 4216 DCI.AddToWorklist(N0.getNode()); 4217 // FIXME: If running after legalize, we probably can't do this. 4218 ISD::NodeType ExtendCode = getExtendForContent(getBooleanContents(OpVT)); 4219 N0 = DAG.getNode(ExtendCode, dl, VT, N0); 4220 } 4221 return N0; 4222 } 4223 4224 // Could not fold it. 4225 return SDValue(); 4226 } 4227 4228 /// Returns true (and the GlobalValue and the offset) if the node is a 4229 /// GlobalAddress + offset. 4230 bool TargetLowering::isGAPlusOffset(SDNode *WN, const GlobalValue *&GA, 4231 int64_t &Offset) const { 4232 4233 SDNode *N = unwrapAddress(SDValue(WN, 0)).getNode(); 4234 4235 if (auto *GASD = dyn_cast<GlobalAddressSDNode>(N)) { 4236 GA = GASD->getGlobal(); 4237 Offset += GASD->getOffset(); 4238 return true; 4239 } 4240 4241 if (N->getOpcode() == ISD::ADD) { 4242 SDValue N1 = N->getOperand(0); 4243 SDValue N2 = N->getOperand(1); 4244 if (isGAPlusOffset(N1.getNode(), GA, Offset)) { 4245 if (auto *V = dyn_cast<ConstantSDNode>(N2)) { 4246 Offset += V->getSExtValue(); 4247 return true; 4248 } 4249 } else if (isGAPlusOffset(N2.getNode(), GA, Offset)) { 4250 if (auto *V = dyn_cast<ConstantSDNode>(N1)) { 4251 Offset += V->getSExtValue(); 4252 return true; 4253 } 4254 } 4255 } 4256 4257 return false; 4258 } 4259 4260 SDValue TargetLowering::PerformDAGCombine(SDNode *N, 4261 DAGCombinerInfo &DCI) const { 4262 // Default implementation: no optimization. 4263 return SDValue(); 4264 } 4265 4266 //===----------------------------------------------------------------------===// 4267 // Inline Assembler Implementation Methods 4268 //===----------------------------------------------------------------------===// 4269 4270 TargetLowering::ConstraintType 4271 TargetLowering::getConstraintType(StringRef Constraint) const { 4272 unsigned S = Constraint.size(); 4273 4274 if (S == 1) { 4275 switch (Constraint[0]) { 4276 default: break; 4277 case 'r': 4278 return C_RegisterClass; 4279 case 'm': // memory 4280 case 'o': // offsetable 4281 case 'V': // not offsetable 4282 return C_Memory; 4283 case 'n': // Simple Integer 4284 case 'E': // Floating Point Constant 4285 case 'F': // Floating Point Constant 4286 return C_Immediate; 4287 case 'i': // Simple Integer or Relocatable Constant 4288 case 's': // Relocatable Constant 4289 case 'p': // Address. 4290 case 'X': // Allow ANY value. 4291 case 'I': // Target registers. 4292 case 'J': 4293 case 'K': 4294 case 'L': 4295 case 'M': 4296 case 'N': 4297 case 'O': 4298 case 'P': 4299 case '<': 4300 case '>': 4301 return C_Other; 4302 } 4303 } 4304 4305 if (S > 1 && Constraint[0] == '{' && Constraint[S - 1] == '}') { 4306 if (S == 8 && Constraint.substr(1, 6) == "memory") // "{memory}" 4307 return C_Memory; 4308 return C_Register; 4309 } 4310 return C_Unknown; 4311 } 4312 4313 /// Try to replace an X constraint, which matches anything, with another that 4314 /// has more specific requirements based on the type of the corresponding 4315 /// operand. 4316 const char *TargetLowering::LowerXConstraint(EVT ConstraintVT) const { 4317 if (ConstraintVT.isInteger()) 4318 return "r"; 4319 if (ConstraintVT.isFloatingPoint()) 4320 return "f"; // works for many targets 4321 return nullptr; 4322 } 4323 4324 SDValue TargetLowering::LowerAsmOutputForConstraint( 4325 SDValue &Chain, SDValue &Flag, const SDLoc &DL, 4326 const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const { 4327 return SDValue(); 4328 } 4329 4330 /// Lower the specified operand into the Ops vector. 4331 /// If it is invalid, don't add anything to Ops. 4332 void TargetLowering::LowerAsmOperandForConstraint(SDValue Op, 4333 std::string &Constraint, 4334 std::vector<SDValue> &Ops, 4335 SelectionDAG &DAG) const { 4336 4337 if (Constraint.length() > 1) return; 4338 4339 char ConstraintLetter = Constraint[0]; 4340 switch (ConstraintLetter) { 4341 default: break; 4342 case 'X': // Allows any operand; labels (basic block) use this. 4343 if (Op.getOpcode() == ISD::BasicBlock || 4344 Op.getOpcode() == ISD::TargetBlockAddress) { 4345 Ops.push_back(Op); 4346 return; 4347 } 4348 LLVM_FALLTHROUGH; 4349 case 'i': // Simple Integer or Relocatable Constant 4350 case 'n': // Simple Integer 4351 case 's': { // Relocatable Constant 4352 4353 GlobalAddressSDNode *GA; 4354 ConstantSDNode *C; 4355 BlockAddressSDNode *BA; 4356 uint64_t Offset = 0; 4357 4358 // Match (GA) or (C) or (GA+C) or (GA-C) or ((GA+C)+C) or (((GA+C)+C)+C), 4359 // etc., since getelementpointer is variadic. We can't use 4360 // SelectionDAG::FoldSymbolOffset because it expects the GA to be accessible 4361 // while in this case the GA may be furthest from the root node which is 4362 // likely an ISD::ADD. 4363 while (1) { 4364 if ((GA = dyn_cast<GlobalAddressSDNode>(Op)) && ConstraintLetter != 'n') { 4365 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 4366 GA->getValueType(0), 4367 Offset + GA->getOffset())); 4368 return; 4369 } else if ((C = dyn_cast<ConstantSDNode>(Op)) && 4370 ConstraintLetter != 's') { 4371 // gcc prints these as sign extended. Sign extend value to 64 bits 4372 // now; without this it would get ZExt'd later in 4373 // ScheduleDAGSDNodes::EmitNode, which is very generic. 4374 bool IsBool = C->getConstantIntValue()->getBitWidth() == 1; 4375 BooleanContent BCont = getBooleanContents(MVT::i64); 4376 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont) 4377 : ISD::SIGN_EXTEND; 4378 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? C->getZExtValue() 4379 : C->getSExtValue(); 4380 Ops.push_back(DAG.getTargetConstant(Offset + ExtVal, 4381 SDLoc(C), MVT::i64)); 4382 return; 4383 } else if ((BA = dyn_cast<BlockAddressSDNode>(Op)) && 4384 ConstraintLetter != 'n') { 4385 Ops.push_back(DAG.getTargetBlockAddress( 4386 BA->getBlockAddress(), BA->getValueType(0), 4387 Offset + BA->getOffset(), BA->getTargetFlags())); 4388 return; 4389 } else { 4390 const unsigned OpCode = Op.getOpcode(); 4391 if (OpCode == ISD::ADD || OpCode == ISD::SUB) { 4392 if ((C = dyn_cast<ConstantSDNode>(Op.getOperand(0)))) 4393 Op = Op.getOperand(1); 4394 // Subtraction is not commutative. 4395 else if (OpCode == ISD::ADD && 4396 (C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))) 4397 Op = Op.getOperand(0); 4398 else 4399 return; 4400 Offset += (OpCode == ISD::ADD ? 1 : -1) * C->getSExtValue(); 4401 continue; 4402 } 4403 } 4404 return; 4405 } 4406 break; 4407 } 4408 } 4409 } 4410 4411 std::pair<unsigned, const TargetRegisterClass *> 4412 TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *RI, 4413 StringRef Constraint, 4414 MVT VT) const { 4415 if (Constraint.empty() || Constraint[0] != '{') 4416 return std::make_pair(0u, static_cast<TargetRegisterClass *>(nullptr)); 4417 assert(*(Constraint.end() - 1) == '}' && "Not a brace enclosed constraint?"); 4418 4419 // Remove the braces from around the name. 4420 StringRef RegName(Constraint.data() + 1, Constraint.size() - 2); 4421 4422 std::pair<unsigned, const TargetRegisterClass *> R = 4423 std::make_pair(0u, static_cast<const TargetRegisterClass *>(nullptr)); 4424 4425 // Figure out which register class contains this reg. 4426 for (const TargetRegisterClass *RC : RI->regclasses()) { 4427 // If none of the value types for this register class are valid, we 4428 // can't use it. For example, 64-bit reg classes on 32-bit targets. 4429 if (!isLegalRC(*RI, *RC)) 4430 continue; 4431 4432 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 4433 I != E; ++I) { 4434 if (RegName.equals_lower(RI->getRegAsmName(*I))) { 4435 std::pair<unsigned, const TargetRegisterClass *> S = 4436 std::make_pair(*I, RC); 4437 4438 // If this register class has the requested value type, return it, 4439 // otherwise keep searching and return the first class found 4440 // if no other is found which explicitly has the requested type. 4441 if (RI->isTypeLegalForClass(*RC, VT)) 4442 return S; 4443 if (!R.second) 4444 R = S; 4445 } 4446 } 4447 } 4448 4449 return R; 4450 } 4451 4452 //===----------------------------------------------------------------------===// 4453 // Constraint Selection. 4454 4455 /// Return true of this is an input operand that is a matching constraint like 4456 /// "4". 4457 bool TargetLowering::AsmOperandInfo::isMatchingInputConstraint() const { 4458 assert(!ConstraintCode.empty() && "No known constraint!"); 4459 return isdigit(static_cast<unsigned char>(ConstraintCode[0])); 4460 } 4461 4462 /// If this is an input matching constraint, this method returns the output 4463 /// operand it matches. 4464 unsigned TargetLowering::AsmOperandInfo::getMatchedOperand() const { 4465 assert(!ConstraintCode.empty() && "No known constraint!"); 4466 return atoi(ConstraintCode.c_str()); 4467 } 4468 4469 /// Split up the constraint string from the inline assembly value into the 4470 /// specific constraints and their prefixes, and also tie in the associated 4471 /// operand values. 4472 /// If this returns an empty vector, and if the constraint string itself 4473 /// isn't empty, there was an error parsing. 4474 TargetLowering::AsmOperandInfoVector 4475 TargetLowering::ParseConstraints(const DataLayout &DL, 4476 const TargetRegisterInfo *TRI, 4477 const CallBase &Call) const { 4478 /// Information about all of the constraints. 4479 AsmOperandInfoVector ConstraintOperands; 4480 const InlineAsm *IA = cast<InlineAsm>(Call.getCalledOperand()); 4481 unsigned maCount = 0; // Largest number of multiple alternative constraints. 4482 4483 // Do a prepass over the constraints, canonicalizing them, and building up the 4484 // ConstraintOperands list. 4485 unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. 4486 unsigned ResNo = 0; // ResNo - The result number of the next output. 4487 4488 for (InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) { 4489 ConstraintOperands.emplace_back(std::move(CI)); 4490 AsmOperandInfo &OpInfo = ConstraintOperands.back(); 4491 4492 // Update multiple alternative constraint count. 4493 if (OpInfo.multipleAlternatives.size() > maCount) 4494 maCount = OpInfo.multipleAlternatives.size(); 4495 4496 OpInfo.ConstraintVT = MVT::Other; 4497 4498 // Compute the value type for each operand. 4499 switch (OpInfo.Type) { 4500 case InlineAsm::isOutput: 4501 // Indirect outputs just consume an argument. 4502 if (OpInfo.isIndirect) { 4503 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++); 4504 break; 4505 } 4506 4507 // The return value of the call is this value. As such, there is no 4508 // corresponding argument. 4509 assert(!Call.getType()->isVoidTy() && "Bad inline asm!"); 4510 if (StructType *STy = dyn_cast<StructType>(Call.getType())) { 4511 OpInfo.ConstraintVT = 4512 getSimpleValueType(DL, STy->getElementType(ResNo)); 4513 } else { 4514 assert(ResNo == 0 && "Asm only has one result!"); 4515 OpInfo.ConstraintVT = getSimpleValueType(DL, Call.getType()); 4516 } 4517 ++ResNo; 4518 break; 4519 case InlineAsm::isInput: 4520 OpInfo.CallOperandVal = Call.getArgOperand(ArgNo++); 4521 break; 4522 case InlineAsm::isClobber: 4523 // Nothing to do. 4524 break; 4525 } 4526 4527 if (OpInfo.CallOperandVal) { 4528 llvm::Type *OpTy = OpInfo.CallOperandVal->getType(); 4529 if (OpInfo.isIndirect) { 4530 llvm::PointerType *PtrTy = dyn_cast<PointerType>(OpTy); 4531 if (!PtrTy) 4532 report_fatal_error("Indirect operand for inline asm not a pointer!"); 4533 OpTy = PtrTy->getElementType(); 4534 } 4535 4536 // Look for vector wrapped in a struct. e.g. { <16 x i8> }. 4537 if (StructType *STy = dyn_cast<StructType>(OpTy)) 4538 if (STy->getNumElements() == 1) 4539 OpTy = STy->getElementType(0); 4540 4541 // If OpTy is not a single value, it may be a struct/union that we 4542 // can tile with integers. 4543 if (!OpTy->isSingleValueType() && OpTy->isSized()) { 4544 unsigned BitSize = DL.getTypeSizeInBits(OpTy); 4545 switch (BitSize) { 4546 default: break; 4547 case 1: 4548 case 8: 4549 case 16: 4550 case 32: 4551 case 64: 4552 case 128: 4553 OpInfo.ConstraintVT = 4554 MVT::getVT(IntegerType::get(OpTy->getContext(), BitSize), true); 4555 break; 4556 } 4557 } else if (PointerType *PT = dyn_cast<PointerType>(OpTy)) { 4558 unsigned PtrSize = DL.getPointerSizeInBits(PT->getAddressSpace()); 4559 OpInfo.ConstraintVT = MVT::getIntegerVT(PtrSize); 4560 } else { 4561 OpInfo.ConstraintVT = MVT::getVT(OpTy, true); 4562 } 4563 } 4564 } 4565 4566 // If we have multiple alternative constraints, select the best alternative. 4567 if (!ConstraintOperands.empty()) { 4568 if (maCount) { 4569 unsigned bestMAIndex = 0; 4570 int bestWeight = -1; 4571 // weight: -1 = invalid match, and 0 = so-so match to 5 = good match. 4572 int weight = -1; 4573 unsigned maIndex; 4574 // Compute the sums of the weights for each alternative, keeping track 4575 // of the best (highest weight) one so far. 4576 for (maIndex = 0; maIndex < maCount; ++maIndex) { 4577 int weightSum = 0; 4578 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4579 cIndex != eIndex; ++cIndex) { 4580 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 4581 if (OpInfo.Type == InlineAsm::isClobber) 4582 continue; 4583 4584 // If this is an output operand with a matching input operand, 4585 // look up the matching input. If their types mismatch, e.g. one 4586 // is an integer, the other is floating point, or their sizes are 4587 // different, flag it as an maCantMatch. 4588 if (OpInfo.hasMatchingInput()) { 4589 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 4590 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 4591 if ((OpInfo.ConstraintVT.isInteger() != 4592 Input.ConstraintVT.isInteger()) || 4593 (OpInfo.ConstraintVT.getSizeInBits() != 4594 Input.ConstraintVT.getSizeInBits())) { 4595 weightSum = -1; // Can't match. 4596 break; 4597 } 4598 } 4599 } 4600 weight = getMultipleConstraintMatchWeight(OpInfo, maIndex); 4601 if (weight == -1) { 4602 weightSum = -1; 4603 break; 4604 } 4605 weightSum += weight; 4606 } 4607 // Update best. 4608 if (weightSum > bestWeight) { 4609 bestWeight = weightSum; 4610 bestMAIndex = maIndex; 4611 } 4612 } 4613 4614 // Now select chosen alternative in each constraint. 4615 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4616 cIndex != eIndex; ++cIndex) { 4617 AsmOperandInfo &cInfo = ConstraintOperands[cIndex]; 4618 if (cInfo.Type == InlineAsm::isClobber) 4619 continue; 4620 cInfo.selectAlternative(bestMAIndex); 4621 } 4622 } 4623 } 4624 4625 // Check and hook up tied operands, choose constraint code to use. 4626 for (unsigned cIndex = 0, eIndex = ConstraintOperands.size(); 4627 cIndex != eIndex; ++cIndex) { 4628 AsmOperandInfo &OpInfo = ConstraintOperands[cIndex]; 4629 4630 // If this is an output operand with a matching input operand, look up the 4631 // matching input. If their types mismatch, e.g. one is an integer, the 4632 // other is floating point, or their sizes are different, flag it as an 4633 // error. 4634 if (OpInfo.hasMatchingInput()) { 4635 AsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; 4636 4637 if (OpInfo.ConstraintVT != Input.ConstraintVT) { 4638 std::pair<unsigned, const TargetRegisterClass *> MatchRC = 4639 getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, 4640 OpInfo.ConstraintVT); 4641 std::pair<unsigned, const TargetRegisterClass *> InputRC = 4642 getRegForInlineAsmConstraint(TRI, Input.ConstraintCode, 4643 Input.ConstraintVT); 4644 if ((OpInfo.ConstraintVT.isInteger() != 4645 Input.ConstraintVT.isInteger()) || 4646 (MatchRC.second != InputRC.second)) { 4647 report_fatal_error("Unsupported asm: input constraint" 4648 " with a matching output constraint of" 4649 " incompatible type!"); 4650 } 4651 } 4652 } 4653 } 4654 4655 return ConstraintOperands; 4656 } 4657 4658 /// Return an integer indicating how general CT is. 4659 static unsigned getConstraintGenerality(TargetLowering::ConstraintType CT) { 4660 switch (CT) { 4661 case TargetLowering::C_Immediate: 4662 case TargetLowering::C_Other: 4663 case TargetLowering::C_Unknown: 4664 return 0; 4665 case TargetLowering::C_Register: 4666 return 1; 4667 case TargetLowering::C_RegisterClass: 4668 return 2; 4669 case TargetLowering::C_Memory: 4670 return 3; 4671 } 4672 llvm_unreachable("Invalid constraint type"); 4673 } 4674 4675 /// Examine constraint type and operand type and determine a weight value. 4676 /// This object must already have been set up with the operand type 4677 /// and the current alternative constraint selected. 4678 TargetLowering::ConstraintWeight 4679 TargetLowering::getMultipleConstraintMatchWeight( 4680 AsmOperandInfo &info, int maIndex) const { 4681 InlineAsm::ConstraintCodeVector *rCodes; 4682 if (maIndex >= (int)info.multipleAlternatives.size()) 4683 rCodes = &info.Codes; 4684 else 4685 rCodes = &info.multipleAlternatives[maIndex].Codes; 4686 ConstraintWeight BestWeight = CW_Invalid; 4687 4688 // Loop over the options, keeping track of the most general one. 4689 for (unsigned i = 0, e = rCodes->size(); i != e; ++i) { 4690 ConstraintWeight weight = 4691 getSingleConstraintMatchWeight(info, (*rCodes)[i].c_str()); 4692 if (weight > BestWeight) 4693 BestWeight = weight; 4694 } 4695 4696 return BestWeight; 4697 } 4698 4699 /// Examine constraint type and operand type and determine a weight value. 4700 /// This object must already have been set up with the operand type 4701 /// and the current alternative constraint selected. 4702 TargetLowering::ConstraintWeight 4703 TargetLowering::getSingleConstraintMatchWeight( 4704 AsmOperandInfo &info, const char *constraint) const { 4705 ConstraintWeight weight = CW_Invalid; 4706 Value *CallOperandVal = info.CallOperandVal; 4707 // If we don't have a value, we can't do a match, 4708 // but allow it at the lowest weight. 4709 if (!CallOperandVal) 4710 return CW_Default; 4711 // Look at the constraint type. 4712 switch (*constraint) { 4713 case 'i': // immediate integer. 4714 case 'n': // immediate integer with a known value. 4715 if (isa<ConstantInt>(CallOperandVal)) 4716 weight = CW_Constant; 4717 break; 4718 case 's': // non-explicit intregal immediate. 4719 if (isa<GlobalValue>(CallOperandVal)) 4720 weight = CW_Constant; 4721 break; 4722 case 'E': // immediate float if host format. 4723 case 'F': // immediate float. 4724 if (isa<ConstantFP>(CallOperandVal)) 4725 weight = CW_Constant; 4726 break; 4727 case '<': // memory operand with autodecrement. 4728 case '>': // memory operand with autoincrement. 4729 case 'm': // memory operand. 4730 case 'o': // offsettable memory operand 4731 case 'V': // non-offsettable memory operand 4732 weight = CW_Memory; 4733 break; 4734 case 'r': // general register. 4735 case 'g': // general register, memory operand or immediate integer. 4736 // note: Clang converts "g" to "imr". 4737 if (CallOperandVal->getType()->isIntegerTy()) 4738 weight = CW_Register; 4739 break; 4740 case 'X': // any operand. 4741 default: 4742 weight = CW_Default; 4743 break; 4744 } 4745 return weight; 4746 } 4747 4748 /// If there are multiple different constraints that we could pick for this 4749 /// operand (e.g. "imr") try to pick the 'best' one. 4750 /// This is somewhat tricky: constraints fall into four classes: 4751 /// Other -> immediates and magic values 4752 /// Register -> one specific register 4753 /// RegisterClass -> a group of regs 4754 /// Memory -> memory 4755 /// Ideally, we would pick the most specific constraint possible: if we have 4756 /// something that fits into a register, we would pick it. The problem here 4757 /// is that if we have something that could either be in a register or in 4758 /// memory that use of the register could cause selection of *other* 4759 /// operands to fail: they might only succeed if we pick memory. Because of 4760 /// this the heuristic we use is: 4761 /// 4762 /// 1) If there is an 'other' constraint, and if the operand is valid for 4763 /// that constraint, use it. This makes us take advantage of 'i' 4764 /// constraints when available. 4765 /// 2) Otherwise, pick the most general constraint present. This prefers 4766 /// 'm' over 'r', for example. 4767 /// 4768 static void ChooseConstraint(TargetLowering::AsmOperandInfo &OpInfo, 4769 const TargetLowering &TLI, 4770 SDValue Op, SelectionDAG *DAG) { 4771 assert(OpInfo.Codes.size() > 1 && "Doesn't have multiple constraint options"); 4772 unsigned BestIdx = 0; 4773 TargetLowering::ConstraintType BestType = TargetLowering::C_Unknown; 4774 int BestGenerality = -1; 4775 4776 // Loop over the options, keeping track of the most general one. 4777 for (unsigned i = 0, e = OpInfo.Codes.size(); i != e; ++i) { 4778 TargetLowering::ConstraintType CType = 4779 TLI.getConstraintType(OpInfo.Codes[i]); 4780 4781 // Indirect 'other' or 'immediate' constraints are not allowed. 4782 if (OpInfo.isIndirect && !(CType == TargetLowering::C_Memory || 4783 CType == TargetLowering::C_Register || 4784 CType == TargetLowering::C_RegisterClass)) 4785 continue; 4786 4787 // If this is an 'other' or 'immediate' constraint, see if the operand is 4788 // valid for it. For example, on X86 we might have an 'rI' constraint. If 4789 // the operand is an integer in the range [0..31] we want to use I (saving a 4790 // load of a register), otherwise we must use 'r'. 4791 if ((CType == TargetLowering::C_Other || 4792 CType == TargetLowering::C_Immediate) && Op.getNode()) { 4793 assert(OpInfo.Codes[i].size() == 1 && 4794 "Unhandled multi-letter 'other' constraint"); 4795 std::vector<SDValue> ResultOps; 4796 TLI.LowerAsmOperandForConstraint(Op, OpInfo.Codes[i], 4797 ResultOps, *DAG); 4798 if (!ResultOps.empty()) { 4799 BestType = CType; 4800 BestIdx = i; 4801 break; 4802 } 4803 } 4804 4805 // Things with matching constraints can only be registers, per gcc 4806 // documentation. This mainly affects "g" constraints. 4807 if (CType == TargetLowering::C_Memory && OpInfo.hasMatchingInput()) 4808 continue; 4809 4810 // This constraint letter is more general than the previous one, use it. 4811 int Generality = getConstraintGenerality(CType); 4812 if (Generality > BestGenerality) { 4813 BestType = CType; 4814 BestIdx = i; 4815 BestGenerality = Generality; 4816 } 4817 } 4818 4819 OpInfo.ConstraintCode = OpInfo.Codes[BestIdx]; 4820 OpInfo.ConstraintType = BestType; 4821 } 4822 4823 /// Determines the constraint code and constraint type to use for the specific 4824 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType. 4825 void TargetLowering::ComputeConstraintToUse(AsmOperandInfo &OpInfo, 4826 SDValue Op, 4827 SelectionDAG *DAG) const { 4828 assert(!OpInfo.Codes.empty() && "Must have at least one constraint"); 4829 4830 // Single-letter constraints ('r') are very common. 4831 if (OpInfo.Codes.size() == 1) { 4832 OpInfo.ConstraintCode = OpInfo.Codes[0]; 4833 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 4834 } else { 4835 ChooseConstraint(OpInfo, *this, Op, DAG); 4836 } 4837 4838 // 'X' matches anything. 4839 if (OpInfo.ConstraintCode == "X" && OpInfo.CallOperandVal) { 4840 // Labels and constants are handled elsewhere ('X' is the only thing 4841 // that matches labels). For Functions, the type here is the type of 4842 // the result, which is not what we want to look at; leave them alone. 4843 Value *v = OpInfo.CallOperandVal; 4844 if (isa<BasicBlock>(v) || isa<ConstantInt>(v) || isa<Function>(v)) { 4845 OpInfo.CallOperandVal = v; 4846 return; 4847 } 4848 4849 if (Op.getNode() && Op.getOpcode() == ISD::TargetBlockAddress) 4850 return; 4851 4852 // Otherwise, try to resolve it to something we know about by looking at 4853 // the actual operand type. 4854 if (const char *Repl = LowerXConstraint(OpInfo.ConstraintVT)) { 4855 OpInfo.ConstraintCode = Repl; 4856 OpInfo.ConstraintType = getConstraintType(OpInfo.ConstraintCode); 4857 } 4858 } 4859 } 4860 4861 /// Given an exact SDIV by a constant, create a multiplication 4862 /// with the multiplicative inverse of the constant. 4863 static SDValue BuildExactSDIV(const TargetLowering &TLI, SDNode *N, 4864 const SDLoc &dl, SelectionDAG &DAG, 4865 SmallVectorImpl<SDNode *> &Created) { 4866 SDValue Op0 = N->getOperand(0); 4867 SDValue Op1 = N->getOperand(1); 4868 EVT VT = N->getValueType(0); 4869 EVT SVT = VT.getScalarType(); 4870 EVT ShVT = TLI.getShiftAmountTy(VT, DAG.getDataLayout()); 4871 EVT ShSVT = ShVT.getScalarType(); 4872 4873 bool UseSRA = false; 4874 SmallVector<SDValue, 16> Shifts, Factors; 4875 4876 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 4877 if (C->isNullValue()) 4878 return false; 4879 APInt Divisor = C->getAPIntValue(); 4880 unsigned Shift = Divisor.countTrailingZeros(); 4881 if (Shift) { 4882 Divisor.ashrInPlace(Shift); 4883 UseSRA = true; 4884 } 4885 // Calculate the multiplicative inverse, using Newton's method. 4886 APInt t; 4887 APInt Factor = Divisor; 4888 while ((t = Divisor * Factor) != 1) 4889 Factor *= APInt(Divisor.getBitWidth(), 2) - t; 4890 Shifts.push_back(DAG.getConstant(Shift, dl, ShSVT)); 4891 Factors.push_back(DAG.getConstant(Factor, dl, SVT)); 4892 return true; 4893 }; 4894 4895 // Collect all magic values from the build vector. 4896 if (!ISD::matchUnaryPredicate(Op1, BuildSDIVPattern)) 4897 return SDValue(); 4898 4899 SDValue Shift, Factor; 4900 if (VT.isVector()) { 4901 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 4902 Factor = DAG.getBuildVector(VT, dl, Factors); 4903 } else { 4904 Shift = Shifts[0]; 4905 Factor = Factors[0]; 4906 } 4907 4908 SDValue Res = Op0; 4909 4910 // Shift the value upfront if it is even, so the LSB is one. 4911 if (UseSRA) { 4912 // TODO: For UDIV use SRL instead of SRA. 4913 SDNodeFlags Flags; 4914 Flags.setExact(true); 4915 Res = DAG.getNode(ISD::SRA, dl, VT, Res, Shift, Flags); 4916 Created.push_back(Res.getNode()); 4917 } 4918 4919 return DAG.getNode(ISD::MUL, dl, VT, Res, Factor); 4920 } 4921 4922 SDValue TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 4923 SelectionDAG &DAG, 4924 SmallVectorImpl<SDNode *> &Created) const { 4925 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 4926 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4927 if (TLI.isIntDivCheap(N->getValueType(0), Attr)) 4928 return SDValue(N, 0); // Lower SDIV as SDIV 4929 return SDValue(); 4930 } 4931 4932 /// Given an ISD::SDIV node expressing a divide by constant, 4933 /// return a DAG expression to select that will generate the same value by 4934 /// multiplying by a magic number. 4935 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 4936 SDValue TargetLowering::BuildSDIV(SDNode *N, SelectionDAG &DAG, 4937 bool IsAfterLegalization, 4938 SmallVectorImpl<SDNode *> &Created) const { 4939 SDLoc dl(N); 4940 EVT VT = N->getValueType(0); 4941 EVT SVT = VT.getScalarType(); 4942 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 4943 EVT ShSVT = ShVT.getScalarType(); 4944 unsigned EltBits = VT.getScalarSizeInBits(); 4945 4946 // Check to see if we can do this. 4947 // FIXME: We should be more aggressive here. 4948 if (!isTypeLegal(VT)) 4949 return SDValue(); 4950 4951 // If the sdiv has an 'exact' bit we can use a simpler lowering. 4952 if (N->getFlags().hasExact()) 4953 return BuildExactSDIV(*this, N, dl, DAG, Created); 4954 4955 SmallVector<SDValue, 16> MagicFactors, Factors, Shifts, ShiftMasks; 4956 4957 auto BuildSDIVPattern = [&](ConstantSDNode *C) { 4958 if (C->isNullValue()) 4959 return false; 4960 4961 const APInt &Divisor = C->getAPIntValue(); 4962 APInt::ms magics = Divisor.magic(); 4963 int NumeratorFactor = 0; 4964 int ShiftMask = -1; 4965 4966 if (Divisor.isOneValue() || Divisor.isAllOnesValue()) { 4967 // If d is +1/-1, we just multiply the numerator by +1/-1. 4968 NumeratorFactor = Divisor.getSExtValue(); 4969 magics.m = 0; 4970 magics.s = 0; 4971 ShiftMask = 0; 4972 } else if (Divisor.isStrictlyPositive() && magics.m.isNegative()) { 4973 // If d > 0 and m < 0, add the numerator. 4974 NumeratorFactor = 1; 4975 } else if (Divisor.isNegative() && magics.m.isStrictlyPositive()) { 4976 // If d < 0 and m > 0, subtract the numerator. 4977 NumeratorFactor = -1; 4978 } 4979 4980 MagicFactors.push_back(DAG.getConstant(magics.m, dl, SVT)); 4981 Factors.push_back(DAG.getConstant(NumeratorFactor, dl, SVT)); 4982 Shifts.push_back(DAG.getConstant(magics.s, dl, ShSVT)); 4983 ShiftMasks.push_back(DAG.getConstant(ShiftMask, dl, SVT)); 4984 return true; 4985 }; 4986 4987 SDValue N0 = N->getOperand(0); 4988 SDValue N1 = N->getOperand(1); 4989 4990 // Collect the shifts / magic values from each element. 4991 if (!ISD::matchUnaryPredicate(N1, BuildSDIVPattern)) 4992 return SDValue(); 4993 4994 SDValue MagicFactor, Factor, Shift, ShiftMask; 4995 if (VT.isVector()) { 4996 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 4997 Factor = DAG.getBuildVector(VT, dl, Factors); 4998 Shift = DAG.getBuildVector(ShVT, dl, Shifts); 4999 ShiftMask = DAG.getBuildVector(VT, dl, ShiftMasks); 5000 } else { 5001 MagicFactor = MagicFactors[0]; 5002 Factor = Factors[0]; 5003 Shift = Shifts[0]; 5004 ShiftMask = ShiftMasks[0]; 5005 } 5006 5007 // Multiply the numerator (operand 0) by the magic value. 5008 // FIXME: We should support doing a MUL in a wider type. 5009 SDValue Q; 5010 if (IsAfterLegalization ? isOperationLegal(ISD::MULHS, VT) 5011 : isOperationLegalOrCustom(ISD::MULHS, VT)) 5012 Q = DAG.getNode(ISD::MULHS, dl, VT, N0, MagicFactor); 5013 else if (IsAfterLegalization ? isOperationLegal(ISD::SMUL_LOHI, VT) 5014 : isOperationLegalOrCustom(ISD::SMUL_LOHI, VT)) { 5015 SDValue LoHi = 5016 DAG.getNode(ISD::SMUL_LOHI, dl, DAG.getVTList(VT, VT), N0, MagicFactor); 5017 Q = SDValue(LoHi.getNode(), 1); 5018 } else 5019 return SDValue(); // No mulhs or equivalent. 5020 Created.push_back(Q.getNode()); 5021 5022 // (Optionally) Add/subtract the numerator using Factor. 5023 Factor = DAG.getNode(ISD::MUL, dl, VT, N0, Factor); 5024 Created.push_back(Factor.getNode()); 5025 Q = DAG.getNode(ISD::ADD, dl, VT, Q, Factor); 5026 Created.push_back(Q.getNode()); 5027 5028 // Shift right algebraic by shift value. 5029 Q = DAG.getNode(ISD::SRA, dl, VT, Q, Shift); 5030 Created.push_back(Q.getNode()); 5031 5032 // Extract the sign bit, mask it and add it to the quotient. 5033 SDValue SignShift = DAG.getConstant(EltBits - 1, dl, ShVT); 5034 SDValue T = DAG.getNode(ISD::SRL, dl, VT, Q, SignShift); 5035 Created.push_back(T.getNode()); 5036 T = DAG.getNode(ISD::AND, dl, VT, T, ShiftMask); 5037 Created.push_back(T.getNode()); 5038 return DAG.getNode(ISD::ADD, dl, VT, Q, T); 5039 } 5040 5041 /// Given an ISD::UDIV node expressing a divide by constant, 5042 /// return a DAG expression to select that will generate the same value by 5043 /// multiplying by a magic number. 5044 /// Ref: "Hacker's Delight" or "The PowerPC Compiler Writer's Guide". 5045 SDValue TargetLowering::BuildUDIV(SDNode *N, SelectionDAG &DAG, 5046 bool IsAfterLegalization, 5047 SmallVectorImpl<SDNode *> &Created) const { 5048 SDLoc dl(N); 5049 EVT VT = N->getValueType(0); 5050 EVT SVT = VT.getScalarType(); 5051 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5052 EVT ShSVT = ShVT.getScalarType(); 5053 unsigned EltBits = VT.getScalarSizeInBits(); 5054 5055 // Check to see if we can do this. 5056 // FIXME: We should be more aggressive here. 5057 if (!isTypeLegal(VT)) 5058 return SDValue(); 5059 5060 bool UseNPQ = false; 5061 SmallVector<SDValue, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; 5062 5063 auto BuildUDIVPattern = [&](ConstantSDNode *C) { 5064 if (C->isNullValue()) 5065 return false; 5066 // FIXME: We should use a narrower constant when the upper 5067 // bits are known to be zero. 5068 APInt Divisor = C->getAPIntValue(); 5069 APInt::mu magics = Divisor.magicu(); 5070 unsigned PreShift = 0, PostShift = 0; 5071 5072 // If the divisor is even, we can avoid using the expensive fixup by 5073 // shifting the divided value upfront. 5074 if (magics.a != 0 && !Divisor[0]) { 5075 PreShift = Divisor.countTrailingZeros(); 5076 // Get magic number for the shifted divisor. 5077 magics = Divisor.lshr(PreShift).magicu(PreShift); 5078 assert(magics.a == 0 && "Should use cheap fixup now"); 5079 } 5080 5081 APInt Magic = magics.m; 5082 5083 unsigned SelNPQ; 5084 if (magics.a == 0 || Divisor.isOneValue()) { 5085 assert(magics.s < Divisor.getBitWidth() && 5086 "We shouldn't generate an undefined shift!"); 5087 PostShift = magics.s; 5088 SelNPQ = false; 5089 } else { 5090 PostShift = magics.s - 1; 5091 SelNPQ = true; 5092 } 5093 5094 PreShifts.push_back(DAG.getConstant(PreShift, dl, ShSVT)); 5095 MagicFactors.push_back(DAG.getConstant(Magic, dl, SVT)); 5096 NPQFactors.push_back( 5097 DAG.getConstant(SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1) 5098 : APInt::getNullValue(EltBits), 5099 dl, SVT)); 5100 PostShifts.push_back(DAG.getConstant(PostShift, dl, ShSVT)); 5101 UseNPQ |= SelNPQ; 5102 return true; 5103 }; 5104 5105 SDValue N0 = N->getOperand(0); 5106 SDValue N1 = N->getOperand(1); 5107 5108 // Collect the shifts/magic values from each element. 5109 if (!ISD::matchUnaryPredicate(N1, BuildUDIVPattern)) 5110 return SDValue(); 5111 5112 SDValue PreShift, PostShift, MagicFactor, NPQFactor; 5113 if (VT.isVector()) { 5114 PreShift = DAG.getBuildVector(ShVT, dl, PreShifts); 5115 MagicFactor = DAG.getBuildVector(VT, dl, MagicFactors); 5116 NPQFactor = DAG.getBuildVector(VT, dl, NPQFactors); 5117 PostShift = DAG.getBuildVector(ShVT, dl, PostShifts); 5118 } else { 5119 PreShift = PreShifts[0]; 5120 MagicFactor = MagicFactors[0]; 5121 PostShift = PostShifts[0]; 5122 } 5123 5124 SDValue Q = N0; 5125 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PreShift); 5126 Created.push_back(Q.getNode()); 5127 5128 // FIXME: We should support doing a MUL in a wider type. 5129 auto GetMULHU = [&](SDValue X, SDValue Y) { 5130 if (IsAfterLegalization ? isOperationLegal(ISD::MULHU, VT) 5131 : isOperationLegalOrCustom(ISD::MULHU, VT)) 5132 return DAG.getNode(ISD::MULHU, dl, VT, X, Y); 5133 if (IsAfterLegalization ? isOperationLegal(ISD::UMUL_LOHI, VT) 5134 : isOperationLegalOrCustom(ISD::UMUL_LOHI, VT)) { 5135 SDValue LoHi = 5136 DAG.getNode(ISD::UMUL_LOHI, dl, DAG.getVTList(VT, VT), X, Y); 5137 return SDValue(LoHi.getNode(), 1); 5138 } 5139 return SDValue(); // No mulhu or equivalent 5140 }; 5141 5142 // Multiply the numerator (operand 0) by the magic value. 5143 Q = GetMULHU(Q, MagicFactor); 5144 if (!Q) 5145 return SDValue(); 5146 5147 Created.push_back(Q.getNode()); 5148 5149 if (UseNPQ) { 5150 SDValue NPQ = DAG.getNode(ISD::SUB, dl, VT, N0, Q); 5151 Created.push_back(NPQ.getNode()); 5152 5153 // For vectors we might have a mix of non-NPQ/NPQ paths, so use 5154 // MULHU to act as a SRL-by-1 for NPQ, else multiply by zero. 5155 if (VT.isVector()) 5156 NPQ = GetMULHU(NPQ, NPQFactor); 5157 else 5158 NPQ = DAG.getNode(ISD::SRL, dl, VT, NPQ, DAG.getConstant(1, dl, ShVT)); 5159 5160 Created.push_back(NPQ.getNode()); 5161 5162 Q = DAG.getNode(ISD::ADD, dl, VT, NPQ, Q); 5163 Created.push_back(Q.getNode()); 5164 } 5165 5166 Q = DAG.getNode(ISD::SRL, dl, VT, Q, PostShift); 5167 Created.push_back(Q.getNode()); 5168 5169 SDValue One = DAG.getConstant(1, dl, VT); 5170 SDValue IsOne = DAG.getSetCC(dl, VT, N1, One, ISD::SETEQ); 5171 return DAG.getSelect(dl, VT, IsOne, N0, Q); 5172 } 5173 5174 /// If all values in Values that *don't* match the predicate are same 'splat' 5175 /// value, then replace all values with that splat value. 5176 /// Else, if AlternativeReplacement was provided, then replace all values that 5177 /// do match predicate with AlternativeReplacement value. 5178 static void 5179 turnVectorIntoSplatVector(MutableArrayRef<SDValue> Values, 5180 std::function<bool(SDValue)> Predicate, 5181 SDValue AlternativeReplacement = SDValue()) { 5182 SDValue Replacement; 5183 // Is there a value for which the Predicate does *NOT* match? What is it? 5184 auto SplatValue = llvm::find_if_not(Values, Predicate); 5185 if (SplatValue != Values.end()) { 5186 // Does Values consist only of SplatValue's and values matching Predicate? 5187 if (llvm::all_of(Values, [Predicate, SplatValue](SDValue Value) { 5188 return Value == *SplatValue || Predicate(Value); 5189 })) // Then we shall replace values matching predicate with SplatValue. 5190 Replacement = *SplatValue; 5191 } 5192 if (!Replacement) { 5193 // Oops, we did not find the "baseline" splat value. 5194 if (!AlternativeReplacement) 5195 return; // Nothing to do. 5196 // Let's replace with provided value then. 5197 Replacement = AlternativeReplacement; 5198 } 5199 std::replace_if(Values.begin(), Values.end(), Predicate, Replacement); 5200 } 5201 5202 /// Given an ISD::UREM used only by an ISD::SETEQ or ISD::SETNE 5203 /// where the divisor is constant and the comparison target is zero, 5204 /// return a DAG expression that will generate the same comparison result 5205 /// using only multiplications, additions and shifts/rotations. 5206 /// Ref: "Hacker's Delight" 10-17. 5207 SDValue TargetLowering::buildUREMEqFold(EVT SETCCVT, SDValue REMNode, 5208 SDValue CompTargetNode, 5209 ISD::CondCode Cond, 5210 DAGCombinerInfo &DCI, 5211 const SDLoc &DL) const { 5212 SmallVector<SDNode *, 5> Built; 5213 if (SDValue Folded = prepareUREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 5214 DCI, DL, Built)) { 5215 for (SDNode *N : Built) 5216 DCI.AddToWorklist(N); 5217 return Folded; 5218 } 5219 5220 return SDValue(); 5221 } 5222 5223 SDValue 5224 TargetLowering::prepareUREMEqFold(EVT SETCCVT, SDValue REMNode, 5225 SDValue CompTargetNode, ISD::CondCode Cond, 5226 DAGCombinerInfo &DCI, const SDLoc &DL, 5227 SmallVectorImpl<SDNode *> &Created) const { 5228 // fold (seteq/ne (urem N, D), 0) -> (setule/ugt (rotr (mul N, P), K), Q) 5229 // - D must be constant, with D = D0 * 2^K where D0 is odd 5230 // - P is the multiplicative inverse of D0 modulo 2^W 5231 // - Q = floor(((2^W) - 1) / D) 5232 // where W is the width of the common type of N and D. 5233 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5234 "Only applicable for (in)equality comparisons."); 5235 5236 SelectionDAG &DAG = DCI.DAG; 5237 5238 EVT VT = REMNode.getValueType(); 5239 EVT SVT = VT.getScalarType(); 5240 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5241 EVT ShSVT = ShVT.getScalarType(); 5242 5243 // If MUL is unavailable, we cannot proceed in any case. 5244 if (!isOperationLegalOrCustom(ISD::MUL, VT)) 5245 return SDValue(); 5246 5247 bool ComparingWithAllZeros = true; 5248 bool AllComparisonsWithNonZerosAreTautological = true; 5249 bool HadTautologicalLanes = false; 5250 bool AllLanesAreTautological = true; 5251 bool HadEvenDivisor = false; 5252 bool AllDivisorsArePowerOfTwo = true; 5253 bool HadTautologicalInvertedLanes = false; 5254 SmallVector<SDValue, 16> PAmts, KAmts, QAmts, IAmts; 5255 5256 auto BuildUREMPattern = [&](ConstantSDNode *CDiv, ConstantSDNode *CCmp) { 5257 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5258 if (CDiv->isNullValue()) 5259 return false; 5260 5261 const APInt &D = CDiv->getAPIntValue(); 5262 const APInt &Cmp = CCmp->getAPIntValue(); 5263 5264 ComparingWithAllZeros &= Cmp.isNullValue(); 5265 5266 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5267 // if C2 is not less than C1, the comparison is always false. 5268 // But we will only be able to produce the comparison that will give the 5269 // opposive tautological answer. So this lane would need to be fixed up. 5270 bool TautologicalInvertedLane = D.ule(Cmp); 5271 HadTautologicalInvertedLanes |= TautologicalInvertedLane; 5272 5273 // If all lanes are tautological (either all divisors are ones, or divisor 5274 // is not greater than the constant we are comparing with), 5275 // we will prefer to avoid the fold. 5276 bool TautologicalLane = D.isOneValue() || TautologicalInvertedLane; 5277 HadTautologicalLanes |= TautologicalLane; 5278 AllLanesAreTautological &= TautologicalLane; 5279 5280 // If we are comparing with non-zero, we need'll need to subtract said 5281 // comparison value from the LHS. But there is no point in doing that if 5282 // every lane where we are comparing with non-zero is tautological.. 5283 if (!Cmp.isNullValue()) 5284 AllComparisonsWithNonZerosAreTautological &= TautologicalLane; 5285 5286 // Decompose D into D0 * 2^K 5287 unsigned K = D.countTrailingZeros(); 5288 assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate."); 5289 APInt D0 = D.lshr(K); 5290 5291 // D is even if it has trailing zeros. 5292 HadEvenDivisor |= (K != 0); 5293 // D is a power-of-two if D0 is one. 5294 // If all divisors are power-of-two, we will prefer to avoid the fold. 5295 AllDivisorsArePowerOfTwo &= D0.isOneValue(); 5296 5297 // P = inv(D0, 2^W) 5298 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5299 unsigned W = D.getBitWidth(); 5300 APInt P = D0.zext(W + 1) 5301 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5302 .trunc(W); 5303 assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable 5304 assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check."); 5305 5306 // Q = floor((2^W - 1) u/ D) 5307 // R = ((2^W - 1) u% D) 5308 APInt Q, R; 5309 APInt::udivrem(APInt::getAllOnesValue(W), D, Q, R); 5310 5311 // If we are comparing with zero, then that comparison constant is okay, 5312 // else it may need to be one less than that. 5313 if (Cmp.ugt(R)) 5314 Q -= 1; 5315 5316 assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) && 5317 "We are expecting that K is always less than all-ones for ShSVT"); 5318 5319 // If the lane is tautological the result can be constant-folded. 5320 if (TautologicalLane) { 5321 // Set P and K amount to a bogus values so we can try to splat them. 5322 P = 0; 5323 K = -1; 5324 // And ensure that comparison constant is tautological, 5325 // it will always compare true/false. 5326 Q = -1; 5327 } 5328 5329 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 5330 KAmts.push_back( 5331 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 5332 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 5333 return true; 5334 }; 5335 5336 SDValue N = REMNode.getOperand(0); 5337 SDValue D = REMNode.getOperand(1); 5338 5339 // Collect the values from each element. 5340 if (!ISD::matchBinaryPredicate(D, CompTargetNode, BuildUREMPattern)) 5341 return SDValue(); 5342 5343 // If all lanes are tautological, the result can be constant-folded. 5344 if (AllLanesAreTautological) 5345 return SDValue(); 5346 5347 // If this is a urem by a powers-of-two, avoid the fold since it can be 5348 // best implemented as a bit test. 5349 if (AllDivisorsArePowerOfTwo) 5350 return SDValue(); 5351 5352 SDValue PVal, KVal, QVal; 5353 if (VT.isVector()) { 5354 if (HadTautologicalLanes) { 5355 // Try to turn PAmts into a splat, since we don't care about the values 5356 // that are currently '0'. If we can't, just keep '0'`s. 5357 turnVectorIntoSplatVector(PAmts, isNullConstant); 5358 // Try to turn KAmts into a splat, since we don't care about the values 5359 // that are currently '-1'. If we can't, change them to '0'`s. 5360 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 5361 DAG.getConstant(0, DL, ShSVT)); 5362 } 5363 5364 PVal = DAG.getBuildVector(VT, DL, PAmts); 5365 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 5366 QVal = DAG.getBuildVector(VT, DL, QAmts); 5367 } else { 5368 PVal = PAmts[0]; 5369 KVal = KAmts[0]; 5370 QVal = QAmts[0]; 5371 } 5372 5373 if (!ComparingWithAllZeros && !AllComparisonsWithNonZerosAreTautological) { 5374 if (!isOperationLegalOrCustom(ISD::SUB, VT)) 5375 return SDValue(); // FIXME: Could/should use `ISD::ADD`? 5376 assert(CompTargetNode.getValueType() == N.getValueType() && 5377 "Expecting that the types on LHS and RHS of comparisons match."); 5378 N = DAG.getNode(ISD::SUB, DL, VT, N, CompTargetNode); 5379 } 5380 5381 // (mul N, P) 5382 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 5383 Created.push_back(Op0.getNode()); 5384 5385 // Rotate right only if any divisor was even. We avoid rotates for all-odd 5386 // divisors as a performance improvement, since rotating by 0 is a no-op. 5387 if (HadEvenDivisor) { 5388 // We need ROTR to do this. 5389 if (!isOperationLegalOrCustom(ISD::ROTR, VT)) 5390 return SDValue(); 5391 SDNodeFlags Flags; 5392 Flags.setExact(true); 5393 // UREM: (rotr (mul N, P), K) 5394 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags); 5395 Created.push_back(Op0.getNode()); 5396 } 5397 5398 // UREM: (setule/setugt (rotr (mul N, P), K), Q) 5399 SDValue NewCC = 5400 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 5401 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 5402 if (!HadTautologicalInvertedLanes) 5403 return NewCC; 5404 5405 // If any lanes previously compared always-false, the NewCC will give 5406 // always-true result for them, so we need to fixup those lanes. 5407 // Or the other way around for inequality predicate. 5408 assert(VT.isVector() && "Can/should only get here for vectors."); 5409 Created.push_back(NewCC.getNode()); 5410 5411 // x u% C1` is *always* less than C1. So given `x u% C1 == C2`, 5412 // if C2 is not less than C1, the comparison is always false. 5413 // But we have produced the comparison that will give the 5414 // opposive tautological answer. So these lanes would need to be fixed up. 5415 SDValue TautologicalInvertedChannels = 5416 DAG.getSetCC(DL, SETCCVT, D, CompTargetNode, ISD::SETULE); 5417 Created.push_back(TautologicalInvertedChannels.getNode()); 5418 5419 if (isOperationLegalOrCustom(ISD::VSELECT, SETCCVT)) { 5420 // If we have a vector select, let's replace the comparison results in the 5421 // affected lanes with the correct tautological result. 5422 SDValue Replacement = DAG.getBoolConstant(Cond == ISD::SETEQ ? false : true, 5423 DL, SETCCVT, SETCCVT); 5424 return DAG.getNode(ISD::VSELECT, DL, SETCCVT, TautologicalInvertedChannels, 5425 Replacement, NewCC); 5426 } 5427 5428 // Else, we can just invert the comparison result in the appropriate lanes. 5429 if (isOperationLegalOrCustom(ISD::XOR, SETCCVT)) 5430 return DAG.getNode(ISD::XOR, DL, SETCCVT, NewCC, 5431 TautologicalInvertedChannels); 5432 5433 return SDValue(); // Don't know how to lower. 5434 } 5435 5436 /// Given an ISD::SREM used only by an ISD::SETEQ or ISD::SETNE 5437 /// where the divisor is constant and the comparison target is zero, 5438 /// return a DAG expression that will generate the same comparison result 5439 /// using only multiplications, additions and shifts/rotations. 5440 /// Ref: "Hacker's Delight" 10-17. 5441 SDValue TargetLowering::buildSREMEqFold(EVT SETCCVT, SDValue REMNode, 5442 SDValue CompTargetNode, 5443 ISD::CondCode Cond, 5444 DAGCombinerInfo &DCI, 5445 const SDLoc &DL) const { 5446 SmallVector<SDNode *, 7> Built; 5447 if (SDValue Folded = prepareSREMEqFold(SETCCVT, REMNode, CompTargetNode, Cond, 5448 DCI, DL, Built)) { 5449 assert(Built.size() <= 7 && "Max size prediction failed."); 5450 for (SDNode *N : Built) 5451 DCI.AddToWorklist(N); 5452 return Folded; 5453 } 5454 5455 return SDValue(); 5456 } 5457 5458 SDValue 5459 TargetLowering::prepareSREMEqFold(EVT SETCCVT, SDValue REMNode, 5460 SDValue CompTargetNode, ISD::CondCode Cond, 5461 DAGCombinerInfo &DCI, const SDLoc &DL, 5462 SmallVectorImpl<SDNode *> &Created) const { 5463 // Fold: 5464 // (seteq/ne (srem N, D), 0) 5465 // To: 5466 // (setule/ugt (rotr (add (mul N, P), A), K), Q) 5467 // 5468 // - D must be constant, with D = D0 * 2^K where D0 is odd 5469 // - P is the multiplicative inverse of D0 modulo 2^W 5470 // - A = bitwiseand(floor((2^(W - 1) - 1) / D0), (-(2^k))) 5471 // - Q = floor((2 * A) / (2^K)) 5472 // where W is the width of the common type of N and D. 5473 assert((Cond == ISD::SETEQ || Cond == ISD::SETNE) && 5474 "Only applicable for (in)equality comparisons."); 5475 5476 SelectionDAG &DAG = DCI.DAG; 5477 5478 EVT VT = REMNode.getValueType(); 5479 EVT SVT = VT.getScalarType(); 5480 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 5481 EVT ShSVT = ShVT.getScalarType(); 5482 5483 // If MUL is unavailable, we cannot proceed in any case. 5484 if (!isOperationLegalOrCustom(ISD::MUL, VT)) 5485 return SDValue(); 5486 5487 // TODO: Could support comparing with non-zero too. 5488 ConstantSDNode *CompTarget = isConstOrConstSplat(CompTargetNode); 5489 if (!CompTarget || !CompTarget->isNullValue()) 5490 return SDValue(); 5491 5492 bool HadIntMinDivisor = false; 5493 bool HadOneDivisor = false; 5494 bool AllDivisorsAreOnes = true; 5495 bool HadEvenDivisor = false; 5496 bool NeedToApplyOffset = false; 5497 bool AllDivisorsArePowerOfTwo = true; 5498 SmallVector<SDValue, 16> PAmts, AAmts, KAmts, QAmts; 5499 5500 auto BuildSREMPattern = [&](ConstantSDNode *C) { 5501 // Division by 0 is UB. Leave it to be constant-folded elsewhere. 5502 if (C->isNullValue()) 5503 return false; 5504 5505 // FIXME: we don't fold `rem %X, -C` to `rem %X, C` in DAGCombine. 5506 5507 // WARNING: this fold is only valid for positive divisors! 5508 APInt D = C->getAPIntValue(); 5509 if (D.isNegative()) 5510 D.negate(); // `rem %X, -C` is equivalent to `rem %X, C` 5511 5512 HadIntMinDivisor |= D.isMinSignedValue(); 5513 5514 // If all divisors are ones, we will prefer to avoid the fold. 5515 HadOneDivisor |= D.isOneValue(); 5516 AllDivisorsAreOnes &= D.isOneValue(); 5517 5518 // Decompose D into D0 * 2^K 5519 unsigned K = D.countTrailingZeros(); 5520 assert((!D.isOneValue() || (K == 0)) && "For divisor '1' we won't rotate."); 5521 APInt D0 = D.lshr(K); 5522 5523 if (!D.isMinSignedValue()) { 5524 // D is even if it has trailing zeros; unless it's INT_MIN, in which case 5525 // we don't care about this lane in this fold, we'll special-handle it. 5526 HadEvenDivisor |= (K != 0); 5527 } 5528 5529 // D is a power-of-two if D0 is one. This includes INT_MIN. 5530 // If all divisors are power-of-two, we will prefer to avoid the fold. 5531 AllDivisorsArePowerOfTwo &= D0.isOneValue(); 5532 5533 // P = inv(D0, 2^W) 5534 // 2^W requires W + 1 bits, so we have to extend and then truncate. 5535 unsigned W = D.getBitWidth(); 5536 APInt P = D0.zext(W + 1) 5537 .multiplicativeInverse(APInt::getSignedMinValue(W + 1)) 5538 .trunc(W); 5539 assert(!P.isNullValue() && "No multiplicative inverse!"); // unreachable 5540 assert((D0 * P).isOneValue() && "Multiplicative inverse sanity check."); 5541 5542 // A = floor((2^(W - 1) - 1) / D0) & -2^K 5543 APInt A = APInt::getSignedMaxValue(W).udiv(D0); 5544 A.clearLowBits(K); 5545 5546 if (!D.isMinSignedValue()) { 5547 // If divisor INT_MIN, then we don't care about this lane in this fold, 5548 // we'll special-handle it. 5549 NeedToApplyOffset |= A != 0; 5550 } 5551 5552 // Q = floor((2 * A) / (2^K)) 5553 APInt Q = (2 * A).udiv(APInt::getOneBitSet(W, K)); 5554 5555 assert(APInt::getAllOnesValue(SVT.getSizeInBits()).ugt(A) && 5556 "We are expecting that A is always less than all-ones for SVT"); 5557 assert(APInt::getAllOnesValue(ShSVT.getSizeInBits()).ugt(K) && 5558 "We are expecting that K is always less than all-ones for ShSVT"); 5559 5560 // If the divisor is 1 the result can be constant-folded. Likewise, we 5561 // don't care about INT_MIN lanes, those can be set to undef if appropriate. 5562 if (D.isOneValue()) { 5563 // Set P, A and K to a bogus values so we can try to splat them. 5564 P = 0; 5565 A = -1; 5566 K = -1; 5567 5568 // x ?% 1 == 0 <--> true <--> x u<= -1 5569 Q = -1; 5570 } 5571 5572 PAmts.push_back(DAG.getConstant(P, DL, SVT)); 5573 AAmts.push_back(DAG.getConstant(A, DL, SVT)); 5574 KAmts.push_back( 5575 DAG.getConstant(APInt(ShSVT.getSizeInBits(), K), DL, ShSVT)); 5576 QAmts.push_back(DAG.getConstant(Q, DL, SVT)); 5577 return true; 5578 }; 5579 5580 SDValue N = REMNode.getOperand(0); 5581 SDValue D = REMNode.getOperand(1); 5582 5583 // Collect the values from each element. 5584 if (!ISD::matchUnaryPredicate(D, BuildSREMPattern)) 5585 return SDValue(); 5586 5587 // If this is a srem by a one, avoid the fold since it can be constant-folded. 5588 if (AllDivisorsAreOnes) 5589 return SDValue(); 5590 5591 // If this is a srem by a powers-of-two (including INT_MIN), avoid the fold 5592 // since it can be best implemented as a bit test. 5593 if (AllDivisorsArePowerOfTwo) 5594 return SDValue(); 5595 5596 SDValue PVal, AVal, KVal, QVal; 5597 if (VT.isVector()) { 5598 if (HadOneDivisor) { 5599 // Try to turn PAmts into a splat, since we don't care about the values 5600 // that are currently '0'. If we can't, just keep '0'`s. 5601 turnVectorIntoSplatVector(PAmts, isNullConstant); 5602 // Try to turn AAmts into a splat, since we don't care about the 5603 // values that are currently '-1'. If we can't, change them to '0'`s. 5604 turnVectorIntoSplatVector(AAmts, isAllOnesConstant, 5605 DAG.getConstant(0, DL, SVT)); 5606 // Try to turn KAmts into a splat, since we don't care about the values 5607 // that are currently '-1'. If we can't, change them to '0'`s. 5608 turnVectorIntoSplatVector(KAmts, isAllOnesConstant, 5609 DAG.getConstant(0, DL, ShSVT)); 5610 } 5611 5612 PVal = DAG.getBuildVector(VT, DL, PAmts); 5613 AVal = DAG.getBuildVector(VT, DL, AAmts); 5614 KVal = DAG.getBuildVector(ShVT, DL, KAmts); 5615 QVal = DAG.getBuildVector(VT, DL, QAmts); 5616 } else { 5617 PVal = PAmts[0]; 5618 AVal = AAmts[0]; 5619 KVal = KAmts[0]; 5620 QVal = QAmts[0]; 5621 } 5622 5623 // (mul N, P) 5624 SDValue Op0 = DAG.getNode(ISD::MUL, DL, VT, N, PVal); 5625 Created.push_back(Op0.getNode()); 5626 5627 if (NeedToApplyOffset) { 5628 // We need ADD to do this. 5629 if (!isOperationLegalOrCustom(ISD::ADD, VT)) 5630 return SDValue(); 5631 5632 // (add (mul N, P), A) 5633 Op0 = DAG.getNode(ISD::ADD, DL, VT, Op0, AVal); 5634 Created.push_back(Op0.getNode()); 5635 } 5636 5637 // Rotate right only if any divisor was even. We avoid rotates for all-odd 5638 // divisors as a performance improvement, since rotating by 0 is a no-op. 5639 if (HadEvenDivisor) { 5640 // We need ROTR to do this. 5641 if (!isOperationLegalOrCustom(ISD::ROTR, VT)) 5642 return SDValue(); 5643 SDNodeFlags Flags; 5644 Flags.setExact(true); 5645 // SREM: (rotr (add (mul N, P), A), K) 5646 Op0 = DAG.getNode(ISD::ROTR, DL, VT, Op0, KVal, Flags); 5647 Created.push_back(Op0.getNode()); 5648 } 5649 5650 // SREM: (setule/setugt (rotr (add (mul N, P), A), K), Q) 5651 SDValue Fold = 5652 DAG.getSetCC(DL, SETCCVT, Op0, QVal, 5653 ((Cond == ISD::SETEQ) ? ISD::SETULE : ISD::SETUGT)); 5654 5655 // If we didn't have lanes with INT_MIN divisor, then we're done. 5656 if (!HadIntMinDivisor) 5657 return Fold; 5658 5659 // That fold is only valid for positive divisors. Which effectively means, 5660 // it is invalid for INT_MIN divisors. So if we have such a lane, 5661 // we must fix-up results for said lanes. 5662 assert(VT.isVector() && "Can/should only get here for vectors."); 5663 5664 if (!isOperationLegalOrCustom(ISD::SETEQ, VT) || 5665 !isOperationLegalOrCustom(ISD::AND, VT) || 5666 !isOperationLegalOrCustom(Cond, VT) || 5667 !isOperationLegalOrCustom(ISD::VSELECT, VT)) 5668 return SDValue(); 5669 5670 Created.push_back(Fold.getNode()); 5671 5672 SDValue IntMin = DAG.getConstant( 5673 APInt::getSignedMinValue(SVT.getScalarSizeInBits()), DL, VT); 5674 SDValue IntMax = DAG.getConstant( 5675 APInt::getSignedMaxValue(SVT.getScalarSizeInBits()), DL, VT); 5676 SDValue Zero = 5677 DAG.getConstant(APInt::getNullValue(SVT.getScalarSizeInBits()), DL, VT); 5678 5679 // Which lanes had INT_MIN divisors? Divisor is constant, so const-folded. 5680 SDValue DivisorIsIntMin = DAG.getSetCC(DL, SETCCVT, D, IntMin, ISD::SETEQ); 5681 Created.push_back(DivisorIsIntMin.getNode()); 5682 5683 // (N s% INT_MIN) ==/!= 0 <--> (N & INT_MAX) ==/!= 0 5684 SDValue Masked = DAG.getNode(ISD::AND, DL, VT, N, IntMax); 5685 Created.push_back(Masked.getNode()); 5686 SDValue MaskedIsZero = DAG.getSetCC(DL, SETCCVT, Masked, Zero, Cond); 5687 Created.push_back(MaskedIsZero.getNode()); 5688 5689 // To produce final result we need to blend 2 vectors: 'SetCC' and 5690 // 'MaskedIsZero'. If the divisor for channel was *NOT* INT_MIN, we pick 5691 // from 'Fold', else pick from 'MaskedIsZero'. Since 'DivisorIsIntMin' is 5692 // constant-folded, select can get lowered to a shuffle with constant mask. 5693 SDValue Blended = 5694 DAG.getNode(ISD::VSELECT, DL, VT, DivisorIsIntMin, MaskedIsZero, Fold); 5695 5696 return Blended; 5697 } 5698 5699 bool TargetLowering:: 5700 verifyReturnAddressArgumentIsConstant(SDValue Op, SelectionDAG &DAG) const { 5701 if (!isa<ConstantSDNode>(Op.getOperand(0))) { 5702 DAG.getContext()->emitError("argument to '__builtin_return_address' must " 5703 "be a constant integer"); 5704 return true; 5705 } 5706 5707 return false; 5708 } 5709 5710 SDValue TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG, 5711 bool LegalOps, bool OptForSize, 5712 NegatibleCost &Cost, 5713 unsigned Depth) const { 5714 // fneg is removable even if it has multiple uses. 5715 if (Op.getOpcode() == ISD::FNEG) { 5716 Cost = NegatibleCost::Cheaper; 5717 return Op.getOperand(0); 5718 } 5719 5720 // Don't recurse exponentially. 5721 if (Depth > SelectionDAG::MaxRecursionDepth) 5722 return SDValue(); 5723 5724 // Pre-increment recursion depth for use in recursive calls. 5725 ++Depth; 5726 const SDNodeFlags Flags = Op->getFlags(); 5727 const TargetOptions &Options = DAG.getTarget().Options; 5728 EVT VT = Op.getValueType(); 5729 unsigned Opcode = Op.getOpcode(); 5730 5731 // Don't allow anything with multiple uses unless we know it is free. 5732 if (!Op.hasOneUse() && Opcode != ISD::ConstantFP) { 5733 bool IsFreeExtend = Opcode == ISD::FP_EXTEND && 5734 isFPExtFree(VT, Op.getOperand(0).getValueType()); 5735 if (!IsFreeExtend) 5736 return SDValue(); 5737 } 5738 5739 SDLoc DL(Op); 5740 5741 switch (Opcode) { 5742 case ISD::ConstantFP: { 5743 // Don't invert constant FP values after legalization unless the target says 5744 // the negated constant is legal. 5745 bool IsOpLegal = 5746 isOperationLegal(ISD::ConstantFP, VT) || 5747 isFPImmLegal(neg(cast<ConstantFPSDNode>(Op)->getValueAPF()), VT, 5748 OptForSize); 5749 5750 if (LegalOps && !IsOpLegal) 5751 break; 5752 5753 APFloat V = cast<ConstantFPSDNode>(Op)->getValueAPF(); 5754 V.changeSign(); 5755 SDValue CFP = DAG.getConstantFP(V, DL, VT); 5756 5757 // If we already have the use of the negated floating constant, it is free 5758 // to negate it even it has multiple uses. 5759 if (!Op.hasOneUse() && CFP.use_empty()) 5760 break; 5761 Cost = NegatibleCost::Neutral; 5762 return CFP; 5763 } 5764 case ISD::BUILD_VECTOR: { 5765 // Only permit BUILD_VECTOR of constants. 5766 if (llvm::any_of(Op->op_values(), [&](SDValue N) { 5767 return !N.isUndef() && !isa<ConstantFPSDNode>(N); 5768 })) 5769 break; 5770 5771 bool IsOpLegal = 5772 (isOperationLegal(ISD::ConstantFP, VT) && 5773 isOperationLegal(ISD::BUILD_VECTOR, VT)) || 5774 llvm::all_of(Op->op_values(), [&](SDValue N) { 5775 return N.isUndef() || 5776 isFPImmLegal(neg(cast<ConstantFPSDNode>(N)->getValueAPF()), VT, 5777 OptForSize); 5778 }); 5779 5780 if (LegalOps && !IsOpLegal) 5781 break; 5782 5783 SmallVector<SDValue, 4> Ops; 5784 for (SDValue C : Op->op_values()) { 5785 if (C.isUndef()) { 5786 Ops.push_back(C); 5787 continue; 5788 } 5789 APFloat V = cast<ConstantFPSDNode>(C)->getValueAPF(); 5790 V.changeSign(); 5791 Ops.push_back(DAG.getConstantFP(V, DL, C.getValueType())); 5792 } 5793 Cost = NegatibleCost::Neutral; 5794 return DAG.getBuildVector(VT, DL, Ops); 5795 } 5796 case ISD::FADD: { 5797 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5798 break; 5799 5800 // After operation legalization, it might not be legal to create new FSUBs. 5801 if (LegalOps && !isOperationLegalOrCustom(ISD::FSUB, VT)) 5802 break; 5803 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 5804 5805 // fold (fneg (fadd X, Y)) -> (fsub (fneg X), Y) 5806 NegatibleCost CostX = NegatibleCost::Expensive; 5807 SDValue NegX = 5808 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 5809 // fold (fneg (fadd X, Y)) -> (fsub (fneg Y), X) 5810 NegatibleCost CostY = NegatibleCost::Expensive; 5811 SDValue NegY = 5812 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 5813 5814 // Negate the X if its cost is less or equal than Y. 5815 if (NegX && (CostX <= CostY)) { 5816 Cost = CostX; 5817 return DAG.getNode(ISD::FSUB, DL, VT, NegX, Y, Flags); 5818 } 5819 5820 // Negate the Y if it is not expensive. 5821 if (NegY) { 5822 Cost = CostY; 5823 return DAG.getNode(ISD::FSUB, DL, VT, NegY, X, Flags); 5824 } 5825 break; 5826 } 5827 case ISD::FSUB: { 5828 // We can't turn -(A-B) into B-A when we honor signed zeros. 5829 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5830 break; 5831 5832 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 5833 // fold (fneg (fsub 0, Y)) -> Y 5834 if (ConstantFPSDNode *C = isConstOrConstSplatFP(X, /*AllowUndefs*/ true)) 5835 if (C->isZero()) { 5836 Cost = NegatibleCost::Cheaper; 5837 return Y; 5838 } 5839 5840 // fold (fneg (fsub X, Y)) -> (fsub Y, X) 5841 Cost = NegatibleCost::Neutral; 5842 return DAG.getNode(ISD::FSUB, DL, VT, Y, X, Flags); 5843 } 5844 case ISD::FMUL: 5845 case ISD::FDIV: { 5846 SDValue X = Op.getOperand(0), Y = Op.getOperand(1); 5847 5848 // fold (fneg (fmul X, Y)) -> (fmul (fneg X), Y) 5849 NegatibleCost CostX = NegatibleCost::Expensive; 5850 SDValue NegX = 5851 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 5852 // fold (fneg (fmul X, Y)) -> (fmul X, (fneg Y)) 5853 NegatibleCost CostY = NegatibleCost::Expensive; 5854 SDValue NegY = 5855 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 5856 5857 // Negate the X if its cost is less or equal than Y. 5858 if (NegX && (CostX <= CostY)) { 5859 Cost = CostX; 5860 return DAG.getNode(Opcode, DL, VT, NegX, Y, Flags); 5861 } 5862 5863 // Ignore X * 2.0 because that is expected to be canonicalized to X + X. 5864 if (auto *C = isConstOrConstSplatFP(Op.getOperand(1))) 5865 if (C->isExactlyValue(2.0) && Op.getOpcode() == ISD::FMUL) 5866 break; 5867 5868 // Negate the Y if it is not expensive. 5869 if (NegY) { 5870 Cost = CostY; 5871 return DAG.getNode(Opcode, DL, VT, X, NegY, Flags); 5872 } 5873 break; 5874 } 5875 case ISD::FMA: 5876 case ISD::FMAD: { 5877 if (!Options.NoSignedZerosFPMath && !Flags.hasNoSignedZeros()) 5878 break; 5879 5880 SDValue X = Op.getOperand(0), Y = Op.getOperand(1), Z = Op.getOperand(2); 5881 NegatibleCost CostZ = NegatibleCost::Expensive; 5882 SDValue NegZ = 5883 getNegatedExpression(Z, DAG, LegalOps, OptForSize, CostZ, Depth); 5884 // Give up if fail to negate the Z. 5885 if (!NegZ) 5886 break; 5887 5888 // fold (fneg (fma X, Y, Z)) -> (fma (fneg X), Y, (fneg Z)) 5889 NegatibleCost CostX = NegatibleCost::Expensive; 5890 SDValue NegX = 5891 getNegatedExpression(X, DAG, LegalOps, OptForSize, CostX, Depth); 5892 // fold (fneg (fma X, Y, Z)) -> (fma X, (fneg Y), (fneg Z)) 5893 NegatibleCost CostY = NegatibleCost::Expensive; 5894 SDValue NegY = 5895 getNegatedExpression(Y, DAG, LegalOps, OptForSize, CostY, Depth); 5896 5897 // Negate the X if its cost is less or equal than Y. 5898 if (NegX && (CostX <= CostY)) { 5899 Cost = std::min(CostX, CostZ); 5900 return DAG.getNode(Opcode, DL, VT, NegX, Y, NegZ, Flags); 5901 } 5902 5903 // Negate the Y if it is not expensive. 5904 if (NegY) { 5905 Cost = std::min(CostY, CostZ); 5906 return DAG.getNode(Opcode, DL, VT, X, NegY, NegZ, Flags); 5907 } 5908 break; 5909 } 5910 5911 case ISD::FP_EXTEND: 5912 case ISD::FSIN: 5913 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 5914 OptForSize, Cost, Depth)) 5915 return DAG.getNode(Opcode, DL, VT, NegV); 5916 break; 5917 case ISD::FP_ROUND: 5918 if (SDValue NegV = getNegatedExpression(Op.getOperand(0), DAG, LegalOps, 5919 OptForSize, Cost, Depth)) 5920 return DAG.getNode(ISD::FP_ROUND, DL, VT, NegV, Op.getOperand(1)); 5921 break; 5922 } 5923 5924 return SDValue(); 5925 } 5926 5927 //===----------------------------------------------------------------------===// 5928 // Legalization Utilities 5929 //===----------------------------------------------------------------------===// 5930 5931 bool TargetLowering::expandMUL_LOHI(unsigned Opcode, EVT VT, const SDLoc &dl, 5932 SDValue LHS, SDValue RHS, 5933 SmallVectorImpl<SDValue> &Result, 5934 EVT HiLoVT, SelectionDAG &DAG, 5935 MulExpansionKind Kind, SDValue LL, 5936 SDValue LH, SDValue RL, SDValue RH) const { 5937 assert(Opcode == ISD::MUL || Opcode == ISD::UMUL_LOHI || 5938 Opcode == ISD::SMUL_LOHI); 5939 5940 bool HasMULHS = (Kind == MulExpansionKind::Always) || 5941 isOperationLegalOrCustom(ISD::MULHS, HiLoVT); 5942 bool HasMULHU = (Kind == MulExpansionKind::Always) || 5943 isOperationLegalOrCustom(ISD::MULHU, HiLoVT); 5944 bool HasSMUL_LOHI = (Kind == MulExpansionKind::Always) || 5945 isOperationLegalOrCustom(ISD::SMUL_LOHI, HiLoVT); 5946 bool HasUMUL_LOHI = (Kind == MulExpansionKind::Always) || 5947 isOperationLegalOrCustom(ISD::UMUL_LOHI, HiLoVT); 5948 5949 if (!HasMULHU && !HasMULHS && !HasUMUL_LOHI && !HasSMUL_LOHI) 5950 return false; 5951 5952 unsigned OuterBitSize = VT.getScalarSizeInBits(); 5953 unsigned InnerBitSize = HiLoVT.getScalarSizeInBits(); 5954 unsigned LHSSB = DAG.ComputeNumSignBits(LHS); 5955 unsigned RHSSB = DAG.ComputeNumSignBits(RHS); 5956 5957 // LL, LH, RL, and RH must be either all NULL or all set to a value. 5958 assert((LL.getNode() && LH.getNode() && RL.getNode() && RH.getNode()) || 5959 (!LL.getNode() && !LH.getNode() && !RL.getNode() && !RH.getNode())); 5960 5961 SDVTList VTs = DAG.getVTList(HiLoVT, HiLoVT); 5962 auto MakeMUL_LOHI = [&](SDValue L, SDValue R, SDValue &Lo, SDValue &Hi, 5963 bool Signed) -> bool { 5964 if ((Signed && HasSMUL_LOHI) || (!Signed && HasUMUL_LOHI)) { 5965 Lo = DAG.getNode(Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI, dl, VTs, L, R); 5966 Hi = SDValue(Lo.getNode(), 1); 5967 return true; 5968 } 5969 if ((Signed && HasMULHS) || (!Signed && HasMULHU)) { 5970 Lo = DAG.getNode(ISD::MUL, dl, HiLoVT, L, R); 5971 Hi = DAG.getNode(Signed ? ISD::MULHS : ISD::MULHU, dl, HiLoVT, L, R); 5972 return true; 5973 } 5974 return false; 5975 }; 5976 5977 SDValue Lo, Hi; 5978 5979 if (!LL.getNode() && !RL.getNode() && 5980 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 5981 LL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LHS); 5982 RL = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RHS); 5983 } 5984 5985 if (!LL.getNode()) 5986 return false; 5987 5988 APInt HighMask = APInt::getHighBitsSet(OuterBitSize, InnerBitSize); 5989 if (DAG.MaskedValueIsZero(LHS, HighMask) && 5990 DAG.MaskedValueIsZero(RHS, HighMask)) { 5991 // The inputs are both zero-extended. 5992 if (MakeMUL_LOHI(LL, RL, Lo, Hi, false)) { 5993 Result.push_back(Lo); 5994 Result.push_back(Hi); 5995 if (Opcode != ISD::MUL) { 5996 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 5997 Result.push_back(Zero); 5998 Result.push_back(Zero); 5999 } 6000 return true; 6001 } 6002 } 6003 6004 if (!VT.isVector() && Opcode == ISD::MUL && LHSSB > InnerBitSize && 6005 RHSSB > InnerBitSize) { 6006 // The input values are both sign-extended. 6007 // TODO non-MUL case? 6008 if (MakeMUL_LOHI(LL, RL, Lo, Hi, true)) { 6009 Result.push_back(Lo); 6010 Result.push_back(Hi); 6011 return true; 6012 } 6013 } 6014 6015 unsigned ShiftAmount = OuterBitSize - InnerBitSize; 6016 EVT ShiftAmountTy = getShiftAmountTy(VT, DAG.getDataLayout()); 6017 if (APInt::getMaxValue(ShiftAmountTy.getSizeInBits()).ult(ShiftAmount)) { 6018 // FIXME getShiftAmountTy does not always return a sensible result when VT 6019 // is an illegal type, and so the type may be too small to fit the shift 6020 // amount. Override it with i32. The shift will have to be legalized. 6021 ShiftAmountTy = MVT::i32; 6022 } 6023 SDValue Shift = DAG.getConstant(ShiftAmount, dl, ShiftAmountTy); 6024 6025 if (!LH.getNode() && !RH.getNode() && 6026 isOperationLegalOrCustom(ISD::SRL, VT) && 6027 isOperationLegalOrCustom(ISD::TRUNCATE, HiLoVT)) { 6028 LH = DAG.getNode(ISD::SRL, dl, VT, LHS, Shift); 6029 LH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, LH); 6030 RH = DAG.getNode(ISD::SRL, dl, VT, RHS, Shift); 6031 RH = DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, RH); 6032 } 6033 6034 if (!LH.getNode()) 6035 return false; 6036 6037 if (!MakeMUL_LOHI(LL, RL, Lo, Hi, false)) 6038 return false; 6039 6040 Result.push_back(Lo); 6041 6042 if (Opcode == ISD::MUL) { 6043 RH = DAG.getNode(ISD::MUL, dl, HiLoVT, LL, RH); 6044 LH = DAG.getNode(ISD::MUL, dl, HiLoVT, LH, RL); 6045 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, RH); 6046 Hi = DAG.getNode(ISD::ADD, dl, HiLoVT, Hi, LH); 6047 Result.push_back(Hi); 6048 return true; 6049 } 6050 6051 // Compute the full width result. 6052 auto Merge = [&](SDValue Lo, SDValue Hi) -> SDValue { 6053 Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Lo); 6054 Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6055 Hi = DAG.getNode(ISD::SHL, dl, VT, Hi, Shift); 6056 return DAG.getNode(ISD::OR, dl, VT, Lo, Hi); 6057 }; 6058 6059 SDValue Next = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Hi); 6060 if (!MakeMUL_LOHI(LL, RH, Lo, Hi, false)) 6061 return false; 6062 6063 // This is effectively the add part of a multiply-add of half-sized operands, 6064 // so it cannot overflow. 6065 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6066 6067 if (!MakeMUL_LOHI(LH, RL, Lo, Hi, false)) 6068 return false; 6069 6070 SDValue Zero = DAG.getConstant(0, dl, HiLoVT); 6071 EVT BoolType = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6072 6073 bool UseGlue = (isOperationLegalOrCustom(ISD::ADDC, VT) && 6074 isOperationLegalOrCustom(ISD::ADDE, VT)); 6075 if (UseGlue) 6076 Next = DAG.getNode(ISD::ADDC, dl, DAG.getVTList(VT, MVT::Glue), Next, 6077 Merge(Lo, Hi)); 6078 else 6079 Next = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(VT, BoolType), Next, 6080 Merge(Lo, Hi), DAG.getConstant(0, dl, BoolType)); 6081 6082 SDValue Carry = Next.getValue(1); 6083 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6084 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6085 6086 if (!MakeMUL_LOHI(LH, RH, Lo, Hi, Opcode == ISD::SMUL_LOHI)) 6087 return false; 6088 6089 if (UseGlue) 6090 Hi = DAG.getNode(ISD::ADDE, dl, DAG.getVTList(HiLoVT, MVT::Glue), Hi, Zero, 6091 Carry); 6092 else 6093 Hi = DAG.getNode(ISD::ADDCARRY, dl, DAG.getVTList(HiLoVT, BoolType), Hi, 6094 Zero, Carry); 6095 6096 Next = DAG.getNode(ISD::ADD, dl, VT, Next, Merge(Lo, Hi)); 6097 6098 if (Opcode == ISD::SMUL_LOHI) { 6099 SDValue NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6100 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, RL)); 6101 Next = DAG.getSelectCC(dl, LH, Zero, NextSub, Next, ISD::SETLT); 6102 6103 NextSub = DAG.getNode(ISD::SUB, dl, VT, Next, 6104 DAG.getNode(ISD::ZERO_EXTEND, dl, VT, LL)); 6105 Next = DAG.getSelectCC(dl, RH, Zero, NextSub, Next, ISD::SETLT); 6106 } 6107 6108 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6109 Next = DAG.getNode(ISD::SRL, dl, VT, Next, Shift); 6110 Result.push_back(DAG.getNode(ISD::TRUNCATE, dl, HiLoVT, Next)); 6111 return true; 6112 } 6113 6114 bool TargetLowering::expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT, 6115 SelectionDAG &DAG, MulExpansionKind Kind, 6116 SDValue LL, SDValue LH, SDValue RL, 6117 SDValue RH) const { 6118 SmallVector<SDValue, 2> Result; 6119 bool Ok = expandMUL_LOHI(N->getOpcode(), N->getValueType(0), SDLoc(N), 6120 N->getOperand(0), N->getOperand(1), Result, HiLoVT, 6121 DAG, Kind, LL, LH, RL, RH); 6122 if (Ok) { 6123 assert(Result.size() == 2); 6124 Lo = Result[0]; 6125 Hi = Result[1]; 6126 } 6127 return Ok; 6128 } 6129 6130 // Check that (every element of) Z is undef or not an exact multiple of BW. 6131 static bool isNonZeroModBitWidth(SDValue Z, unsigned BW) { 6132 return ISD::matchUnaryPredicate( 6133 Z, 6134 [=](ConstantSDNode *C) { return !C || C->getAPIntValue().urem(BW) != 0; }, 6135 true); 6136 } 6137 6138 bool TargetLowering::expandFunnelShift(SDNode *Node, SDValue &Result, 6139 SelectionDAG &DAG) const { 6140 EVT VT = Node->getValueType(0); 6141 6142 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 6143 !isOperationLegalOrCustom(ISD::SRL, VT) || 6144 !isOperationLegalOrCustom(ISD::SUB, VT) || 6145 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 6146 return false; 6147 6148 SDValue X = Node->getOperand(0); 6149 SDValue Y = Node->getOperand(1); 6150 SDValue Z = Node->getOperand(2); 6151 6152 unsigned BW = VT.getScalarSizeInBits(); 6153 bool IsFSHL = Node->getOpcode() == ISD::FSHL; 6154 SDLoc DL(SDValue(Node, 0)); 6155 6156 EVT ShVT = Z.getValueType(); 6157 6158 SDValue ShX, ShY; 6159 SDValue ShAmt, InvShAmt; 6160 if (isNonZeroModBitWidth(Z, BW)) { 6161 // fshl: X << C | Y >> (BW - C) 6162 // fshr: X << (BW - C) | Y >> C 6163 // where C = Z % BW is not zero 6164 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 6165 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 6166 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, BitWidthC, ShAmt); 6167 ShX = DAG.getNode(ISD::SHL, DL, VT, X, IsFSHL ? ShAmt : InvShAmt); 6168 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, IsFSHL ? InvShAmt : ShAmt); 6169 } else { 6170 // fshl: X << (Z % BW) | Y >> 1 >> (BW - 1 - (Z % BW)) 6171 // fshr: X << 1 << (BW - 1 - (Z % BW)) | Y >> (Z % BW) 6172 SDValue Mask = DAG.getConstant(BW - 1, DL, ShVT); 6173 if (isPowerOf2_32(BW)) { 6174 // Z % BW -> Z & (BW - 1) 6175 ShAmt = DAG.getNode(ISD::AND, DL, ShVT, Z, Mask); 6176 // (BW - 1) - (Z % BW) -> ~Z & (BW - 1) 6177 InvShAmt = DAG.getNode(ISD::AND, DL, ShVT, DAG.getNOT(DL, Z, ShVT), Mask); 6178 } else { 6179 SDValue BitWidthC = DAG.getConstant(BW, DL, ShVT); 6180 ShAmt = DAG.getNode(ISD::UREM, DL, ShVT, Z, BitWidthC); 6181 InvShAmt = DAG.getNode(ISD::SUB, DL, ShVT, Mask, ShAmt); 6182 } 6183 6184 SDValue One = DAG.getConstant(1, DL, ShVT); 6185 if (IsFSHL) { 6186 ShX = DAG.getNode(ISD::SHL, DL, VT, X, ShAmt); 6187 SDValue ShY1 = DAG.getNode(ISD::SRL, DL, VT, Y, One); 6188 ShY = DAG.getNode(ISD::SRL, DL, VT, ShY1, InvShAmt); 6189 } else { 6190 SDValue ShX1 = DAG.getNode(ISD::SHL, DL, VT, X, One); 6191 ShX = DAG.getNode(ISD::SHL, DL, VT, ShX1, InvShAmt); 6192 ShY = DAG.getNode(ISD::SRL, DL, VT, Y, ShAmt); 6193 } 6194 } 6195 Result = DAG.getNode(ISD::OR, DL, VT, ShX, ShY); 6196 return true; 6197 } 6198 6199 // TODO: Merge with expandFunnelShift. 6200 bool TargetLowering::expandROT(SDNode *Node, SDValue &Result, 6201 SelectionDAG &DAG) const { 6202 EVT VT = Node->getValueType(0); 6203 unsigned EltSizeInBits = VT.getScalarSizeInBits(); 6204 bool IsLeft = Node->getOpcode() == ISD::ROTL; 6205 SDValue Op0 = Node->getOperand(0); 6206 SDValue Op1 = Node->getOperand(1); 6207 SDLoc DL(SDValue(Node, 0)); 6208 6209 EVT ShVT = Op1.getValueType(); 6210 SDValue Zero = DAG.getConstant(0, DL, ShVT); 6211 6212 assert(isPowerOf2_32(EltSizeInBits) && EltSizeInBits > 1 && 6213 "Expecting the type bitwidth to be a power of 2"); 6214 6215 // If a rotate in the other direction is supported, use it. 6216 unsigned RevRot = IsLeft ? ISD::ROTR : ISD::ROTL; 6217 if (isOperationLegalOrCustom(RevRot, VT)) { 6218 SDValue Sub = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 6219 Result = DAG.getNode(RevRot, DL, VT, Op0, Sub); 6220 return true; 6221 } 6222 6223 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SHL, VT) || 6224 !isOperationLegalOrCustom(ISD::SRL, VT) || 6225 !isOperationLegalOrCustom(ISD::SUB, VT) || 6226 !isOperationLegalOrCustomOrPromote(ISD::OR, VT) || 6227 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 6228 return false; 6229 6230 // Otherwise, 6231 // (rotl x, c) -> (or (shl x, (and c, w-1)), (srl x, (and -c, w-1))) 6232 // (rotr x, c) -> (or (srl x, (and c, w-1)), (shl x, (and -c, w-1))) 6233 // 6234 unsigned ShOpc = IsLeft ? ISD::SHL : ISD::SRL; 6235 unsigned HsOpc = IsLeft ? ISD::SRL : ISD::SHL; 6236 SDValue BitWidthMinusOneC = DAG.getConstant(EltSizeInBits - 1, DL, ShVT); 6237 SDValue NegOp1 = DAG.getNode(ISD::SUB, DL, ShVT, Zero, Op1); 6238 SDValue And0 = DAG.getNode(ISD::AND, DL, ShVT, Op1, BitWidthMinusOneC); 6239 SDValue And1 = DAG.getNode(ISD::AND, DL, ShVT, NegOp1, BitWidthMinusOneC); 6240 Result = DAG.getNode(ISD::OR, DL, VT, DAG.getNode(ShOpc, DL, VT, Op0, And0), 6241 DAG.getNode(HsOpc, DL, VT, Op0, And1)); 6242 return true; 6243 } 6244 6245 bool TargetLowering::expandFP_TO_SINT(SDNode *Node, SDValue &Result, 6246 SelectionDAG &DAG) const { 6247 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6248 SDValue Src = Node->getOperand(OpNo); 6249 EVT SrcVT = Src.getValueType(); 6250 EVT DstVT = Node->getValueType(0); 6251 SDLoc dl(SDValue(Node, 0)); 6252 6253 // FIXME: Only f32 to i64 conversions are supported. 6254 if (SrcVT != MVT::f32 || DstVT != MVT::i64) 6255 return false; 6256 6257 if (Node->isStrictFPOpcode()) 6258 // When a NaN is converted to an integer a trap is allowed. We can't 6259 // use this expansion here because it would eliminate that trap. Other 6260 // traps are also allowed and cannot be eliminated. See 6261 // IEEE 754-2008 sec 5.8. 6262 return false; 6263 6264 // Expand f32 -> i64 conversion 6265 // This algorithm comes from compiler-rt's implementation of fixsfdi: 6266 // https://github.com/llvm/llvm-project/blob/master/compiler-rt/lib/builtins/fixsfdi.c 6267 unsigned SrcEltBits = SrcVT.getScalarSizeInBits(); 6268 EVT IntVT = SrcVT.changeTypeToInteger(); 6269 EVT IntShVT = getShiftAmountTy(IntVT, DAG.getDataLayout()); 6270 6271 SDValue ExponentMask = DAG.getConstant(0x7F800000, dl, IntVT); 6272 SDValue ExponentLoBit = DAG.getConstant(23, dl, IntVT); 6273 SDValue Bias = DAG.getConstant(127, dl, IntVT); 6274 SDValue SignMask = DAG.getConstant(APInt::getSignMask(SrcEltBits), dl, IntVT); 6275 SDValue SignLowBit = DAG.getConstant(SrcEltBits - 1, dl, IntVT); 6276 SDValue MantissaMask = DAG.getConstant(0x007FFFFF, dl, IntVT); 6277 6278 SDValue Bits = DAG.getNode(ISD::BITCAST, dl, IntVT, Src); 6279 6280 SDValue ExponentBits = DAG.getNode( 6281 ISD::SRL, dl, IntVT, DAG.getNode(ISD::AND, dl, IntVT, Bits, ExponentMask), 6282 DAG.getZExtOrTrunc(ExponentLoBit, dl, IntShVT)); 6283 SDValue Exponent = DAG.getNode(ISD::SUB, dl, IntVT, ExponentBits, Bias); 6284 6285 SDValue Sign = DAG.getNode(ISD::SRA, dl, IntVT, 6286 DAG.getNode(ISD::AND, dl, IntVT, Bits, SignMask), 6287 DAG.getZExtOrTrunc(SignLowBit, dl, IntShVT)); 6288 Sign = DAG.getSExtOrTrunc(Sign, dl, DstVT); 6289 6290 SDValue R = DAG.getNode(ISD::OR, dl, IntVT, 6291 DAG.getNode(ISD::AND, dl, IntVT, Bits, MantissaMask), 6292 DAG.getConstant(0x00800000, dl, IntVT)); 6293 6294 R = DAG.getZExtOrTrunc(R, dl, DstVT); 6295 6296 R = DAG.getSelectCC( 6297 dl, Exponent, ExponentLoBit, 6298 DAG.getNode(ISD::SHL, dl, DstVT, R, 6299 DAG.getZExtOrTrunc( 6300 DAG.getNode(ISD::SUB, dl, IntVT, Exponent, ExponentLoBit), 6301 dl, IntShVT)), 6302 DAG.getNode(ISD::SRL, dl, DstVT, R, 6303 DAG.getZExtOrTrunc( 6304 DAG.getNode(ISD::SUB, dl, IntVT, ExponentLoBit, Exponent), 6305 dl, IntShVT)), 6306 ISD::SETGT); 6307 6308 SDValue Ret = DAG.getNode(ISD::SUB, dl, DstVT, 6309 DAG.getNode(ISD::XOR, dl, DstVT, R, Sign), Sign); 6310 6311 Result = DAG.getSelectCC(dl, Exponent, DAG.getConstant(0, dl, IntVT), 6312 DAG.getConstant(0, dl, DstVT), Ret, ISD::SETLT); 6313 return true; 6314 } 6315 6316 bool TargetLowering::expandFP_TO_UINT(SDNode *Node, SDValue &Result, 6317 SDValue &Chain, 6318 SelectionDAG &DAG) const { 6319 SDLoc dl(SDValue(Node, 0)); 6320 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6321 SDValue Src = Node->getOperand(OpNo); 6322 6323 EVT SrcVT = Src.getValueType(); 6324 EVT DstVT = Node->getValueType(0); 6325 EVT SetCCVT = 6326 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT); 6327 EVT DstSetCCVT = 6328 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT); 6329 6330 // Only expand vector types if we have the appropriate vector bit operations. 6331 unsigned SIntOpcode = Node->isStrictFPOpcode() ? ISD::STRICT_FP_TO_SINT : 6332 ISD::FP_TO_SINT; 6333 if (DstVT.isVector() && (!isOperationLegalOrCustom(SIntOpcode, DstVT) || 6334 !isOperationLegalOrCustomOrPromote(ISD::XOR, SrcVT))) 6335 return false; 6336 6337 // If the maximum float value is smaller then the signed integer range, 6338 // the destination signmask can't be represented by the float, so we can 6339 // just use FP_TO_SINT directly. 6340 const fltSemantics &APFSem = DAG.EVTToAPFloatSemantics(SrcVT); 6341 APFloat APF(APFSem, APInt::getNullValue(SrcVT.getScalarSizeInBits())); 6342 APInt SignMask = APInt::getSignMask(DstVT.getScalarSizeInBits()); 6343 if (APFloat::opOverflow & 6344 APF.convertFromAPInt(SignMask, false, APFloat::rmNearestTiesToEven)) { 6345 if (Node->isStrictFPOpcode()) { 6346 Result = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 6347 { Node->getOperand(0), Src }); 6348 Chain = Result.getValue(1); 6349 } else 6350 Result = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 6351 return true; 6352 } 6353 6354 SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT); 6355 SDValue Sel; 6356 6357 if (Node->isStrictFPOpcode()) { 6358 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT, 6359 Node->getOperand(0), /*IsSignaling*/ true); 6360 Chain = Sel.getValue(1); 6361 } else { 6362 Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT); 6363 } 6364 6365 bool Strict = Node->isStrictFPOpcode() || 6366 shouldUseStrictFP_TO_INT(SrcVT, DstVT, /*IsSigned*/ false); 6367 6368 if (Strict) { 6369 // Expand based on maximum range of FP_TO_SINT, if the value exceeds the 6370 // signmask then offset (the result of which should be fully representable). 6371 // Sel = Src < 0x8000000000000000 6372 // FltOfs = select Sel, 0, 0x8000000000000000 6373 // IntOfs = select Sel, 0, 0x8000000000000000 6374 // Result = fp_to_sint(Src - FltOfs) ^ IntOfs 6375 6376 // TODO: Should any fast-math-flags be set for the FSUB? 6377 SDValue FltOfs = DAG.getSelect(dl, SrcVT, Sel, 6378 DAG.getConstantFP(0.0, dl, SrcVT), Cst); 6379 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 6380 SDValue IntOfs = DAG.getSelect(dl, DstVT, Sel, 6381 DAG.getConstant(0, dl, DstVT), 6382 DAG.getConstant(SignMask, dl, DstVT)); 6383 SDValue SInt; 6384 if (Node->isStrictFPOpcode()) { 6385 SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl, { SrcVT, MVT::Other }, 6386 { Chain, Src, FltOfs }); 6387 SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { DstVT, MVT::Other }, 6388 { Val.getValue(1), Val }); 6389 Chain = SInt.getValue(1); 6390 } else { 6391 SDValue Val = DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FltOfs); 6392 SInt = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Val); 6393 } 6394 Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs); 6395 } else { 6396 // Expand based on maximum range of FP_TO_SINT: 6397 // True = fp_to_sint(Src) 6398 // False = 0x8000000000000000 + fp_to_sint(Src - 0x8000000000000000) 6399 // Result = select (Src < 0x8000000000000000), True, False 6400 6401 SDValue True = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, Src); 6402 // TODO: Should any fast-math-flags be set for the FSUB? 6403 SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, DstVT, 6404 DAG.getNode(ISD::FSUB, dl, SrcVT, Src, Cst)); 6405 False = DAG.getNode(ISD::XOR, dl, DstVT, False, 6406 DAG.getConstant(SignMask, dl, DstVT)); 6407 Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT); 6408 Result = DAG.getSelect(dl, DstVT, Sel, True, False); 6409 } 6410 return true; 6411 } 6412 6413 bool TargetLowering::expandUINT_TO_FP(SDNode *Node, SDValue &Result, 6414 SDValue &Chain, 6415 SelectionDAG &DAG) const { 6416 unsigned OpNo = Node->isStrictFPOpcode() ? 1 : 0; 6417 SDValue Src = Node->getOperand(OpNo); 6418 EVT SrcVT = Src.getValueType(); 6419 EVT DstVT = Node->getValueType(0); 6420 6421 if (SrcVT.getScalarType() != MVT::i64 || DstVT.getScalarType() != MVT::f64) 6422 return false; 6423 6424 // Only expand vector types if we have the appropriate vector bit operations. 6425 if (SrcVT.isVector() && (!isOperationLegalOrCustom(ISD::SRL, SrcVT) || 6426 !isOperationLegalOrCustom(ISD::FADD, DstVT) || 6427 !isOperationLegalOrCustom(ISD::FSUB, DstVT) || 6428 !isOperationLegalOrCustomOrPromote(ISD::OR, SrcVT) || 6429 !isOperationLegalOrCustomOrPromote(ISD::AND, SrcVT))) 6430 return false; 6431 6432 SDLoc dl(SDValue(Node, 0)); 6433 EVT ShiftVT = getShiftAmountTy(SrcVT, DAG.getDataLayout()); 6434 6435 // Implementation of unsigned i64 to f64 following the algorithm in 6436 // __floatundidf in compiler_rt. This implementation has the advantage 6437 // of performing rounding correctly, both in the default rounding mode 6438 // and in all alternate rounding modes. 6439 SDValue TwoP52 = DAG.getConstant(UINT64_C(0x4330000000000000), dl, SrcVT); 6440 SDValue TwoP84PlusTwoP52 = DAG.getConstantFP( 6441 BitsToDouble(UINT64_C(0x4530000000100000)), dl, DstVT); 6442 SDValue TwoP84 = DAG.getConstant(UINT64_C(0x4530000000000000), dl, SrcVT); 6443 SDValue LoMask = DAG.getConstant(UINT64_C(0x00000000FFFFFFFF), dl, SrcVT); 6444 SDValue HiShift = DAG.getConstant(32, dl, ShiftVT); 6445 6446 SDValue Lo = DAG.getNode(ISD::AND, dl, SrcVT, Src, LoMask); 6447 SDValue Hi = DAG.getNode(ISD::SRL, dl, SrcVT, Src, HiShift); 6448 SDValue LoOr = DAG.getNode(ISD::OR, dl, SrcVT, Lo, TwoP52); 6449 SDValue HiOr = DAG.getNode(ISD::OR, dl, SrcVT, Hi, TwoP84); 6450 SDValue LoFlt = DAG.getBitcast(DstVT, LoOr); 6451 SDValue HiFlt = DAG.getBitcast(DstVT, HiOr); 6452 if (Node->isStrictFPOpcode()) { 6453 SDValue HiSub = 6454 DAG.getNode(ISD::STRICT_FSUB, dl, {DstVT, MVT::Other}, 6455 {Node->getOperand(0), HiFlt, TwoP84PlusTwoP52}); 6456 Result = DAG.getNode(ISD::STRICT_FADD, dl, {DstVT, MVT::Other}, 6457 {HiSub.getValue(1), LoFlt, HiSub}); 6458 Chain = Result.getValue(1); 6459 } else { 6460 SDValue HiSub = 6461 DAG.getNode(ISD::FSUB, dl, DstVT, HiFlt, TwoP84PlusTwoP52); 6462 Result = DAG.getNode(ISD::FADD, dl, DstVT, LoFlt, HiSub); 6463 } 6464 return true; 6465 } 6466 6467 SDValue TargetLowering::expandFMINNUM_FMAXNUM(SDNode *Node, 6468 SelectionDAG &DAG) const { 6469 SDLoc dl(Node); 6470 unsigned NewOp = Node->getOpcode() == ISD::FMINNUM ? 6471 ISD::FMINNUM_IEEE : ISD::FMAXNUM_IEEE; 6472 EVT VT = Node->getValueType(0); 6473 if (isOperationLegalOrCustom(NewOp, VT)) { 6474 SDValue Quiet0 = Node->getOperand(0); 6475 SDValue Quiet1 = Node->getOperand(1); 6476 6477 if (!Node->getFlags().hasNoNaNs()) { 6478 // Insert canonicalizes if it's possible we need to quiet to get correct 6479 // sNaN behavior. 6480 if (!DAG.isKnownNeverSNaN(Quiet0)) { 6481 Quiet0 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet0, 6482 Node->getFlags()); 6483 } 6484 if (!DAG.isKnownNeverSNaN(Quiet1)) { 6485 Quiet1 = DAG.getNode(ISD::FCANONICALIZE, dl, VT, Quiet1, 6486 Node->getFlags()); 6487 } 6488 } 6489 6490 return DAG.getNode(NewOp, dl, VT, Quiet0, Quiet1, Node->getFlags()); 6491 } 6492 6493 // If the target has FMINIMUM/FMAXIMUM but not FMINNUM/FMAXNUM use that 6494 // instead if there are no NaNs. 6495 if (Node->getFlags().hasNoNaNs()) { 6496 unsigned IEEE2018Op = 6497 Node->getOpcode() == ISD::FMINNUM ? ISD::FMINIMUM : ISD::FMAXIMUM; 6498 if (isOperationLegalOrCustom(IEEE2018Op, VT)) { 6499 return DAG.getNode(IEEE2018Op, dl, VT, Node->getOperand(0), 6500 Node->getOperand(1), Node->getFlags()); 6501 } 6502 } 6503 6504 // If none of the above worked, but there are no NaNs, then expand to 6505 // a compare/select sequence. This is required for correctness since 6506 // InstCombine might have canonicalized a fcmp+select sequence to a 6507 // FMINNUM/FMAXNUM node. If we were to fall through to the default 6508 // expansion to libcall, we might introduce a link-time dependency 6509 // on libm into a file that originally did not have one. 6510 if (Node->getFlags().hasNoNaNs()) { 6511 ISD::CondCode Pred = 6512 Node->getOpcode() == ISD::FMINNUM ? ISD::SETLT : ISD::SETGT; 6513 SDValue Op1 = Node->getOperand(0); 6514 SDValue Op2 = Node->getOperand(1); 6515 SDValue SelCC = DAG.getSelectCC(dl, Op1, Op2, Op1, Op2, Pred); 6516 // Copy FMF flags, but always set the no-signed-zeros flag 6517 // as this is implied by the FMINNUM/FMAXNUM semantics. 6518 SDNodeFlags Flags = Node->getFlags(); 6519 Flags.setNoSignedZeros(true); 6520 SelCC->setFlags(Flags); 6521 return SelCC; 6522 } 6523 6524 return SDValue(); 6525 } 6526 6527 bool TargetLowering::expandCTPOP(SDNode *Node, SDValue &Result, 6528 SelectionDAG &DAG) const { 6529 SDLoc dl(Node); 6530 EVT VT = Node->getValueType(0); 6531 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6532 SDValue Op = Node->getOperand(0); 6533 unsigned Len = VT.getScalarSizeInBits(); 6534 assert(VT.isInteger() && "CTPOP not implemented for this type."); 6535 6536 // TODO: Add support for irregular type lengths. 6537 if (!(Len <= 128 && Len % 8 == 0)) 6538 return false; 6539 6540 // Only expand vector types if we have the appropriate vector bit operations. 6541 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::ADD, VT) || 6542 !isOperationLegalOrCustom(ISD::SUB, VT) || 6543 !isOperationLegalOrCustom(ISD::SRL, VT) || 6544 (Len != 8 && !isOperationLegalOrCustom(ISD::MUL, VT)) || 6545 !isOperationLegalOrCustomOrPromote(ISD::AND, VT))) 6546 return false; 6547 6548 // This is the "best" algorithm from 6549 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel 6550 SDValue Mask55 = 6551 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT); 6552 SDValue Mask33 = 6553 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT); 6554 SDValue Mask0F = 6555 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT); 6556 SDValue Mask01 = 6557 DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT); 6558 6559 // v = v - ((v >> 1) & 0x55555555...) 6560 Op = DAG.getNode(ISD::SUB, dl, VT, Op, 6561 DAG.getNode(ISD::AND, dl, VT, 6562 DAG.getNode(ISD::SRL, dl, VT, Op, 6563 DAG.getConstant(1, dl, ShVT)), 6564 Mask55)); 6565 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) 6566 Op = DAG.getNode(ISD::ADD, dl, VT, DAG.getNode(ISD::AND, dl, VT, Op, Mask33), 6567 DAG.getNode(ISD::AND, dl, VT, 6568 DAG.getNode(ISD::SRL, dl, VT, Op, 6569 DAG.getConstant(2, dl, ShVT)), 6570 Mask33)); 6571 // v = (v + (v >> 4)) & 0x0F0F0F0F... 6572 Op = DAG.getNode(ISD::AND, dl, VT, 6573 DAG.getNode(ISD::ADD, dl, VT, Op, 6574 DAG.getNode(ISD::SRL, dl, VT, Op, 6575 DAG.getConstant(4, dl, ShVT))), 6576 Mask0F); 6577 // v = (v * 0x01010101...) >> (Len - 8) 6578 if (Len > 8) 6579 Op = 6580 DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::MUL, dl, VT, Op, Mask01), 6581 DAG.getConstant(Len - 8, dl, ShVT)); 6582 6583 Result = Op; 6584 return true; 6585 } 6586 6587 bool TargetLowering::expandCTLZ(SDNode *Node, SDValue &Result, 6588 SelectionDAG &DAG) const { 6589 SDLoc dl(Node); 6590 EVT VT = Node->getValueType(0); 6591 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6592 SDValue Op = Node->getOperand(0); 6593 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 6594 6595 // If the non-ZERO_UNDEF version is supported we can use that instead. 6596 if (Node->getOpcode() == ISD::CTLZ_ZERO_UNDEF && 6597 isOperationLegalOrCustom(ISD::CTLZ, VT)) { 6598 Result = DAG.getNode(ISD::CTLZ, dl, VT, Op); 6599 return true; 6600 } 6601 6602 // If the ZERO_UNDEF version is supported use that and handle the zero case. 6603 if (isOperationLegalOrCustom(ISD::CTLZ_ZERO_UNDEF, VT)) { 6604 EVT SetCCVT = 6605 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6606 SDValue CTLZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, dl, VT, Op); 6607 SDValue Zero = DAG.getConstant(0, dl, VT); 6608 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 6609 Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero, 6610 DAG.getConstant(NumBitsPerElt, dl, VT), CTLZ); 6611 return true; 6612 } 6613 6614 // Only expand vector types if we have the appropriate vector bit operations. 6615 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 6616 !isOperationLegalOrCustom(ISD::CTPOP, VT) || 6617 !isOperationLegalOrCustom(ISD::SRL, VT) || 6618 !isOperationLegalOrCustomOrPromote(ISD::OR, VT))) 6619 return false; 6620 6621 // for now, we do this: 6622 // x = x | (x >> 1); 6623 // x = x | (x >> 2); 6624 // ... 6625 // x = x | (x >>16); 6626 // x = x | (x >>32); // for 64-bit input 6627 // return popcount(~x); 6628 // 6629 // Ref: "Hacker's Delight" by Henry Warren 6630 for (unsigned i = 0; (1U << i) <= (NumBitsPerElt / 2); ++i) { 6631 SDValue Tmp = DAG.getConstant(1ULL << i, dl, ShVT); 6632 Op = DAG.getNode(ISD::OR, dl, VT, Op, 6633 DAG.getNode(ISD::SRL, dl, VT, Op, Tmp)); 6634 } 6635 Op = DAG.getNOT(dl, Op, VT); 6636 Result = DAG.getNode(ISD::CTPOP, dl, VT, Op); 6637 return true; 6638 } 6639 6640 bool TargetLowering::expandCTTZ(SDNode *Node, SDValue &Result, 6641 SelectionDAG &DAG) const { 6642 SDLoc dl(Node); 6643 EVT VT = Node->getValueType(0); 6644 SDValue Op = Node->getOperand(0); 6645 unsigned NumBitsPerElt = VT.getScalarSizeInBits(); 6646 6647 // If the non-ZERO_UNDEF version is supported we can use that instead. 6648 if (Node->getOpcode() == ISD::CTTZ_ZERO_UNDEF && 6649 isOperationLegalOrCustom(ISD::CTTZ, VT)) { 6650 Result = DAG.getNode(ISD::CTTZ, dl, VT, Op); 6651 return true; 6652 } 6653 6654 // If the ZERO_UNDEF version is supported use that and handle the zero case. 6655 if (isOperationLegalOrCustom(ISD::CTTZ_ZERO_UNDEF, VT)) { 6656 EVT SetCCVT = 6657 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 6658 SDValue CTTZ = DAG.getNode(ISD::CTTZ_ZERO_UNDEF, dl, VT, Op); 6659 SDValue Zero = DAG.getConstant(0, dl, VT); 6660 SDValue SrcIsZero = DAG.getSetCC(dl, SetCCVT, Op, Zero, ISD::SETEQ); 6661 Result = DAG.getNode(ISD::SELECT, dl, VT, SrcIsZero, 6662 DAG.getConstant(NumBitsPerElt, dl, VT), CTTZ); 6663 return true; 6664 } 6665 6666 // Only expand vector types if we have the appropriate vector bit operations. 6667 if (VT.isVector() && (!isPowerOf2_32(NumBitsPerElt) || 6668 (!isOperationLegalOrCustom(ISD::CTPOP, VT) && 6669 !isOperationLegalOrCustom(ISD::CTLZ, VT)) || 6670 !isOperationLegalOrCustom(ISD::SUB, VT) || 6671 !isOperationLegalOrCustomOrPromote(ISD::AND, VT) || 6672 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 6673 return false; 6674 6675 // for now, we use: { return popcount(~x & (x - 1)); } 6676 // unless the target has ctlz but not ctpop, in which case we use: 6677 // { return 32 - nlz(~x & (x-1)); } 6678 // Ref: "Hacker's Delight" by Henry Warren 6679 SDValue Tmp = DAG.getNode( 6680 ISD::AND, dl, VT, DAG.getNOT(dl, Op, VT), 6681 DAG.getNode(ISD::SUB, dl, VT, Op, DAG.getConstant(1, dl, VT))); 6682 6683 // If ISD::CTLZ is legal and CTPOP isn't, then do that instead. 6684 if (isOperationLegal(ISD::CTLZ, VT) && !isOperationLegal(ISD::CTPOP, VT)) { 6685 Result = 6686 DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(NumBitsPerElt, dl, VT), 6687 DAG.getNode(ISD::CTLZ, dl, VT, Tmp)); 6688 return true; 6689 } 6690 6691 Result = DAG.getNode(ISD::CTPOP, dl, VT, Tmp); 6692 return true; 6693 } 6694 6695 bool TargetLowering::expandABS(SDNode *N, SDValue &Result, 6696 SelectionDAG &DAG) const { 6697 SDLoc dl(N); 6698 EVT VT = N->getValueType(0); 6699 EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); 6700 SDValue Op = N->getOperand(0); 6701 6702 // Only expand vector types if we have the appropriate vector operations. 6703 if (VT.isVector() && (!isOperationLegalOrCustom(ISD::SRA, VT) || 6704 !isOperationLegalOrCustom(ISD::ADD, VT) || 6705 !isOperationLegalOrCustomOrPromote(ISD::XOR, VT))) 6706 return false; 6707 6708 SDValue Shift = 6709 DAG.getNode(ISD::SRA, dl, VT, Op, 6710 DAG.getConstant(VT.getScalarSizeInBits() - 1, dl, ShVT)); 6711 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, Op, Shift); 6712 Result = DAG.getNode(ISD::XOR, dl, VT, Add, Shift); 6713 return true; 6714 } 6715 6716 std::pair<SDValue, SDValue> 6717 TargetLowering::scalarizeVectorLoad(LoadSDNode *LD, 6718 SelectionDAG &DAG) const { 6719 SDLoc SL(LD); 6720 SDValue Chain = LD->getChain(); 6721 SDValue BasePTR = LD->getBasePtr(); 6722 EVT SrcVT = LD->getMemoryVT(); 6723 EVT DstVT = LD->getValueType(0); 6724 ISD::LoadExtType ExtType = LD->getExtensionType(); 6725 6726 unsigned NumElem = SrcVT.getVectorNumElements(); 6727 6728 EVT SrcEltVT = SrcVT.getScalarType(); 6729 EVT DstEltVT = DstVT.getScalarType(); 6730 6731 // A vector must always be stored in memory as-is, i.e. without any padding 6732 // between the elements, since various code depend on it, e.g. in the 6733 // handling of a bitcast of a vector type to int, which may be done with a 6734 // vector store followed by an integer load. A vector that does not have 6735 // elements that are byte-sized must therefore be stored as an integer 6736 // built out of the extracted vector elements. 6737 if (!SrcEltVT.isByteSized()) { 6738 unsigned NumLoadBits = SrcVT.getStoreSizeInBits(); 6739 EVT LoadVT = EVT::getIntegerVT(*DAG.getContext(), NumLoadBits); 6740 6741 unsigned NumSrcBits = SrcVT.getSizeInBits(); 6742 EVT SrcIntVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcBits); 6743 6744 unsigned SrcEltBits = SrcEltVT.getSizeInBits(); 6745 SDValue SrcEltBitMask = DAG.getConstant( 6746 APInt::getLowBitsSet(NumLoadBits, SrcEltBits), SL, LoadVT); 6747 6748 // Load the whole vector and avoid masking off the top bits as it makes 6749 // the codegen worse. 6750 SDValue Load = 6751 DAG.getExtLoad(ISD::EXTLOAD, SL, LoadVT, Chain, BasePTR, 6752 LD->getPointerInfo(), SrcIntVT, LD->getAlignment(), 6753 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6754 6755 SmallVector<SDValue, 8> Vals; 6756 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6757 unsigned ShiftIntoIdx = 6758 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 6759 SDValue ShiftAmount = 6760 DAG.getShiftAmountConstant(ShiftIntoIdx * SrcEltVT.getSizeInBits(), 6761 LoadVT, SL, /*LegalTypes=*/false); 6762 SDValue ShiftedElt = DAG.getNode(ISD::SRL, SL, LoadVT, Load, ShiftAmount); 6763 SDValue Elt = 6764 DAG.getNode(ISD::AND, SL, LoadVT, ShiftedElt, SrcEltBitMask); 6765 SDValue Scalar = DAG.getNode(ISD::TRUNCATE, SL, SrcEltVT, Elt); 6766 6767 if (ExtType != ISD::NON_EXTLOAD) { 6768 unsigned ExtendOp = ISD::getExtForLoadExtType(false, ExtType); 6769 Scalar = DAG.getNode(ExtendOp, SL, DstEltVT, Scalar); 6770 } 6771 6772 Vals.push_back(Scalar); 6773 } 6774 6775 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 6776 return std::make_pair(Value, Load.getValue(1)); 6777 } 6778 6779 unsigned Stride = SrcEltVT.getSizeInBits() / 8; 6780 assert(SrcEltVT.isByteSized()); 6781 6782 SmallVector<SDValue, 8> Vals; 6783 SmallVector<SDValue, 8> LoadChains; 6784 6785 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6786 SDValue ScalarLoad = 6787 DAG.getExtLoad(ExtType, SL, DstEltVT, Chain, BasePTR, 6788 LD->getPointerInfo().getWithOffset(Idx * Stride), 6789 SrcEltVT, MinAlign(LD->getAlignment(), Idx * Stride), 6790 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6791 6792 BasePTR = DAG.getObjectPtrOffset(SL, BasePTR, Stride); 6793 6794 Vals.push_back(ScalarLoad.getValue(0)); 6795 LoadChains.push_back(ScalarLoad.getValue(1)); 6796 } 6797 6798 SDValue NewChain = DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoadChains); 6799 SDValue Value = DAG.getBuildVector(DstVT, SL, Vals); 6800 6801 return std::make_pair(Value, NewChain); 6802 } 6803 6804 SDValue TargetLowering::scalarizeVectorStore(StoreSDNode *ST, 6805 SelectionDAG &DAG) const { 6806 SDLoc SL(ST); 6807 6808 SDValue Chain = ST->getChain(); 6809 SDValue BasePtr = ST->getBasePtr(); 6810 SDValue Value = ST->getValue(); 6811 EVT StVT = ST->getMemoryVT(); 6812 6813 // The type of the data we want to save 6814 EVT RegVT = Value.getValueType(); 6815 EVT RegSclVT = RegVT.getScalarType(); 6816 6817 // The type of data as saved in memory. 6818 EVT MemSclVT = StVT.getScalarType(); 6819 6820 unsigned NumElem = StVT.getVectorNumElements(); 6821 6822 // A vector must always be stored in memory as-is, i.e. without any padding 6823 // between the elements, since various code depend on it, e.g. in the 6824 // handling of a bitcast of a vector type to int, which may be done with a 6825 // vector store followed by an integer load. A vector that does not have 6826 // elements that are byte-sized must therefore be stored as an integer 6827 // built out of the extracted vector elements. 6828 if (!MemSclVT.isByteSized()) { 6829 unsigned NumBits = StVT.getSizeInBits(); 6830 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumBits); 6831 6832 SDValue CurrVal = DAG.getConstant(0, SL, IntVT); 6833 6834 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6835 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 6836 DAG.getVectorIdxConstant(Idx, SL)); 6837 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MemSclVT, Elt); 6838 SDValue ExtElt = DAG.getNode(ISD::ZERO_EXTEND, SL, IntVT, Trunc); 6839 unsigned ShiftIntoIdx = 6840 (DAG.getDataLayout().isBigEndian() ? (NumElem - 1) - Idx : Idx); 6841 SDValue ShiftAmount = 6842 DAG.getConstant(ShiftIntoIdx * MemSclVT.getSizeInBits(), SL, IntVT); 6843 SDValue ShiftedElt = 6844 DAG.getNode(ISD::SHL, SL, IntVT, ExtElt, ShiftAmount); 6845 CurrVal = DAG.getNode(ISD::OR, SL, IntVT, CurrVal, ShiftedElt); 6846 } 6847 6848 return DAG.getStore(Chain, SL, CurrVal, BasePtr, ST->getPointerInfo(), 6849 ST->getAlignment(), ST->getMemOperand()->getFlags(), 6850 ST->getAAInfo()); 6851 } 6852 6853 // Store Stride in bytes 6854 unsigned Stride = MemSclVT.getSizeInBits() / 8; 6855 assert(Stride && "Zero stride!"); 6856 // Extract each of the elements from the original vector and save them into 6857 // memory individually. 6858 SmallVector<SDValue, 8> Stores; 6859 for (unsigned Idx = 0; Idx < NumElem; ++Idx) { 6860 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, RegSclVT, Value, 6861 DAG.getVectorIdxConstant(Idx, SL)); 6862 6863 SDValue Ptr = DAG.getObjectPtrOffset(SL, BasePtr, Idx * Stride); 6864 6865 // This scalar TruncStore may be illegal, but we legalize it later. 6866 SDValue Store = DAG.getTruncStore( 6867 Chain, SL, Elt, Ptr, ST->getPointerInfo().getWithOffset(Idx * Stride), 6868 MemSclVT, MinAlign(ST->getAlignment(), Idx * Stride), 6869 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 6870 6871 Stores.push_back(Store); 6872 } 6873 6874 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Stores); 6875 } 6876 6877 std::pair<SDValue, SDValue> 6878 TargetLowering::expandUnalignedLoad(LoadSDNode *LD, SelectionDAG &DAG) const { 6879 assert(LD->getAddressingMode() == ISD::UNINDEXED && 6880 "unaligned indexed loads not implemented!"); 6881 SDValue Chain = LD->getChain(); 6882 SDValue Ptr = LD->getBasePtr(); 6883 EVT VT = LD->getValueType(0); 6884 EVT LoadedVT = LD->getMemoryVT(); 6885 SDLoc dl(LD); 6886 auto &MF = DAG.getMachineFunction(); 6887 6888 if (VT.isFloatingPoint() || VT.isVector()) { 6889 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), LoadedVT.getSizeInBits()); 6890 if (isTypeLegal(intVT) && isTypeLegal(LoadedVT)) { 6891 if (!isOperationLegalOrCustom(ISD::LOAD, intVT) && 6892 LoadedVT.isVector()) { 6893 // Scalarize the load and let the individual components be handled. 6894 return scalarizeVectorLoad(LD, DAG); 6895 } 6896 6897 // Expand to a (misaligned) integer load of the same size, 6898 // then bitconvert to floating point or vector. 6899 SDValue newLoad = DAG.getLoad(intVT, dl, Chain, Ptr, 6900 LD->getMemOperand()); 6901 SDValue Result = DAG.getNode(ISD::BITCAST, dl, LoadedVT, newLoad); 6902 if (LoadedVT != VT) 6903 Result = DAG.getNode(VT.isFloatingPoint() ? ISD::FP_EXTEND : 6904 ISD::ANY_EXTEND, dl, VT, Result); 6905 6906 return std::make_pair(Result, newLoad.getValue(1)); 6907 } 6908 6909 // Copy the value to a (aligned) stack slot using (unaligned) integer 6910 // loads and stores, then do a (aligned) load from the stack slot. 6911 MVT RegVT = getRegisterType(*DAG.getContext(), intVT); 6912 unsigned LoadedBytes = LoadedVT.getStoreSize(); 6913 unsigned RegBytes = RegVT.getSizeInBits() / 8; 6914 unsigned NumRegs = (LoadedBytes + RegBytes - 1) / RegBytes; 6915 6916 // Make sure the stack slot is also aligned for the register type. 6917 SDValue StackBase = DAG.CreateStackTemporary(LoadedVT, RegVT); 6918 auto FrameIndex = cast<FrameIndexSDNode>(StackBase.getNode())->getIndex(); 6919 SmallVector<SDValue, 8> Stores; 6920 SDValue StackPtr = StackBase; 6921 unsigned Offset = 0; 6922 6923 EVT PtrVT = Ptr.getValueType(); 6924 EVT StackPtrVT = StackPtr.getValueType(); 6925 6926 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 6927 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 6928 6929 // Do all but one copies using the full register width. 6930 for (unsigned i = 1; i < NumRegs; i++) { 6931 // Load one integer register's worth from the original location. 6932 SDValue Load = DAG.getLoad( 6933 RegVT, dl, Chain, Ptr, LD->getPointerInfo().getWithOffset(Offset), 6934 MinAlign(LD->getAlignment(), Offset), LD->getMemOperand()->getFlags(), 6935 LD->getAAInfo()); 6936 // Follow the load with a store to the stack slot. Remember the store. 6937 Stores.push_back(DAG.getStore( 6938 Load.getValue(1), dl, Load, StackPtr, 6939 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset))); 6940 // Increment the pointers. 6941 Offset += RegBytes; 6942 6943 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 6944 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 6945 } 6946 6947 // The last copy may be partial. Do an extending load. 6948 EVT MemVT = EVT::getIntegerVT(*DAG.getContext(), 6949 8 * (LoadedBytes - Offset)); 6950 SDValue Load = 6951 DAG.getExtLoad(ISD::EXTLOAD, dl, RegVT, Chain, Ptr, 6952 LD->getPointerInfo().getWithOffset(Offset), MemVT, 6953 MinAlign(LD->getAlignment(), Offset), 6954 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 6955 // Follow the load with a store to the stack slot. Remember the store. 6956 // On big-endian machines this requires a truncating store to ensure 6957 // that the bits end up in the right place. 6958 Stores.push_back(DAG.getTruncStore( 6959 Load.getValue(1), dl, Load, StackPtr, 6960 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), MemVT)); 6961 6962 // The order of the stores doesn't matter - say it with a TokenFactor. 6963 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 6964 6965 // Finally, perform the original load only redirected to the stack slot. 6966 Load = DAG.getExtLoad(LD->getExtensionType(), dl, VT, TF, StackBase, 6967 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), 6968 LoadedVT); 6969 6970 // Callers expect a MERGE_VALUES node. 6971 return std::make_pair(Load, TF); 6972 } 6973 6974 assert(LoadedVT.isInteger() && !LoadedVT.isVector() && 6975 "Unaligned load of unsupported type."); 6976 6977 // Compute the new VT that is half the size of the old one. This is an 6978 // integer MVT. 6979 unsigned NumBits = LoadedVT.getSizeInBits(); 6980 EVT NewLoadedVT; 6981 NewLoadedVT = EVT::getIntegerVT(*DAG.getContext(), NumBits/2); 6982 NumBits >>= 1; 6983 6984 unsigned Alignment = LD->getAlignment(); 6985 unsigned IncrementSize = NumBits / 8; 6986 ISD::LoadExtType HiExtType = LD->getExtensionType(); 6987 6988 // If the original load is NON_EXTLOAD, the hi part load must be ZEXTLOAD. 6989 if (HiExtType == ISD::NON_EXTLOAD) 6990 HiExtType = ISD::ZEXTLOAD; 6991 6992 // Load the value in two parts 6993 SDValue Lo, Hi; 6994 if (DAG.getDataLayout().isLittleEndian()) { 6995 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, LD->getPointerInfo(), 6996 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 6997 LD->getAAInfo()); 6998 6999 Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); 7000 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, 7001 LD->getPointerInfo().getWithOffset(IncrementSize), 7002 NewLoadedVT, MinAlign(Alignment, IncrementSize), 7003 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 7004 } else { 7005 Hi = DAG.getExtLoad(HiExtType, dl, VT, Chain, Ptr, LD->getPointerInfo(), 7006 NewLoadedVT, Alignment, LD->getMemOperand()->getFlags(), 7007 LD->getAAInfo()); 7008 7009 Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); 7010 Lo = DAG.getExtLoad(ISD::ZEXTLOAD, dl, VT, Chain, Ptr, 7011 LD->getPointerInfo().getWithOffset(IncrementSize), 7012 NewLoadedVT, MinAlign(Alignment, IncrementSize), 7013 LD->getMemOperand()->getFlags(), LD->getAAInfo()); 7014 } 7015 7016 // aggregate the two parts 7017 SDValue ShiftAmount = 7018 DAG.getConstant(NumBits, dl, getShiftAmountTy(Hi.getValueType(), 7019 DAG.getDataLayout())); 7020 SDValue Result = DAG.getNode(ISD::SHL, dl, VT, Hi, ShiftAmount); 7021 Result = DAG.getNode(ISD::OR, dl, VT, Result, Lo); 7022 7023 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Lo.getValue(1), 7024 Hi.getValue(1)); 7025 7026 return std::make_pair(Result, TF); 7027 } 7028 7029 SDValue TargetLowering::expandUnalignedStore(StoreSDNode *ST, 7030 SelectionDAG &DAG) const { 7031 assert(ST->getAddressingMode() == ISD::UNINDEXED && 7032 "unaligned indexed stores not implemented!"); 7033 SDValue Chain = ST->getChain(); 7034 SDValue Ptr = ST->getBasePtr(); 7035 SDValue Val = ST->getValue(); 7036 EVT VT = Val.getValueType(); 7037 int Alignment = ST->getAlignment(); 7038 auto &MF = DAG.getMachineFunction(); 7039 EVT StoreMemVT = ST->getMemoryVT(); 7040 7041 SDLoc dl(ST); 7042 if (StoreMemVT.isFloatingPoint() || StoreMemVT.isVector()) { 7043 EVT intVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits()); 7044 if (isTypeLegal(intVT)) { 7045 if (!isOperationLegalOrCustom(ISD::STORE, intVT) && 7046 StoreMemVT.isVector()) { 7047 // Scalarize the store and let the individual components be handled. 7048 SDValue Result = scalarizeVectorStore(ST, DAG); 7049 return Result; 7050 } 7051 // Expand to a bitconvert of the value to the integer type of the 7052 // same size, then a (misaligned) int store. 7053 // FIXME: Does not handle truncating floating point stores! 7054 SDValue Result = DAG.getNode(ISD::BITCAST, dl, intVT, Val); 7055 Result = DAG.getStore(Chain, dl, Result, Ptr, ST->getPointerInfo(), 7056 Alignment, ST->getMemOperand()->getFlags()); 7057 return Result; 7058 } 7059 // Do a (aligned) store to a stack slot, then copy from the stack slot 7060 // to the final destination using (unaligned) integer loads and stores. 7061 MVT RegVT = getRegisterType( 7062 *DAG.getContext(), 7063 EVT::getIntegerVT(*DAG.getContext(), StoreMemVT.getSizeInBits())); 7064 EVT PtrVT = Ptr.getValueType(); 7065 unsigned StoredBytes = StoreMemVT.getStoreSize(); 7066 unsigned RegBytes = RegVT.getSizeInBits() / 8; 7067 unsigned NumRegs = (StoredBytes + RegBytes - 1) / RegBytes; 7068 7069 // Make sure the stack slot is also aligned for the register type. 7070 SDValue StackPtr = DAG.CreateStackTemporary(StoreMemVT, RegVT); 7071 auto FrameIndex = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); 7072 7073 // Perform the original store, only redirected to the stack slot. 7074 SDValue Store = DAG.getTruncStore( 7075 Chain, dl, Val, StackPtr, 7076 MachinePointerInfo::getFixedStack(MF, FrameIndex, 0), StoreMemVT); 7077 7078 EVT StackPtrVT = StackPtr.getValueType(); 7079 7080 SDValue PtrIncrement = DAG.getConstant(RegBytes, dl, PtrVT); 7081 SDValue StackPtrIncrement = DAG.getConstant(RegBytes, dl, StackPtrVT); 7082 SmallVector<SDValue, 8> Stores; 7083 unsigned Offset = 0; 7084 7085 // Do all but one copies using the full register width. 7086 for (unsigned i = 1; i < NumRegs; i++) { 7087 // Load one integer register's worth from the stack slot. 7088 SDValue Load = DAG.getLoad( 7089 RegVT, dl, Store, StackPtr, 7090 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset)); 7091 // Store it to the final location. Remember the store. 7092 Stores.push_back(DAG.getStore(Load.getValue(1), dl, Load, Ptr, 7093 ST->getPointerInfo().getWithOffset(Offset), 7094 MinAlign(ST->getAlignment(), Offset), 7095 ST->getMemOperand()->getFlags())); 7096 // Increment the pointers. 7097 Offset += RegBytes; 7098 StackPtr = DAG.getObjectPtrOffset(dl, StackPtr, StackPtrIncrement); 7099 Ptr = DAG.getObjectPtrOffset(dl, Ptr, PtrIncrement); 7100 } 7101 7102 // The last store may be partial. Do a truncating store. On big-endian 7103 // machines this requires an extending load from the stack slot to ensure 7104 // that the bits are in the right place. 7105 EVT LoadMemVT = 7106 EVT::getIntegerVT(*DAG.getContext(), 8 * (StoredBytes - Offset)); 7107 7108 // Load from the stack slot. 7109 SDValue Load = DAG.getExtLoad( 7110 ISD::EXTLOAD, dl, RegVT, Store, StackPtr, 7111 MachinePointerInfo::getFixedStack(MF, FrameIndex, Offset), LoadMemVT); 7112 7113 Stores.push_back( 7114 DAG.getTruncStore(Load.getValue(1), dl, Load, Ptr, 7115 ST->getPointerInfo().getWithOffset(Offset), LoadMemVT, 7116 MinAlign(ST->getAlignment(), Offset), 7117 ST->getMemOperand()->getFlags(), ST->getAAInfo())); 7118 // The order of the stores doesn't matter - say it with a TokenFactor. 7119 SDValue Result = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores); 7120 return Result; 7121 } 7122 7123 assert(StoreMemVT.isInteger() && !StoreMemVT.isVector() && 7124 "Unaligned store of unknown type."); 7125 // Get the half-size VT 7126 EVT NewStoredVT = StoreMemVT.getHalfSizedIntegerVT(*DAG.getContext()); 7127 int NumBits = NewStoredVT.getSizeInBits(); 7128 int IncrementSize = NumBits / 8; 7129 7130 // Divide the stored value in two parts. 7131 SDValue ShiftAmount = DAG.getConstant( 7132 NumBits, dl, getShiftAmountTy(Val.getValueType(), DAG.getDataLayout())); 7133 SDValue Lo = Val; 7134 SDValue Hi = DAG.getNode(ISD::SRL, dl, VT, Val, ShiftAmount); 7135 7136 // Store the two parts 7137 SDValue Store1, Store2; 7138 Store1 = DAG.getTruncStore(Chain, dl, 7139 DAG.getDataLayout().isLittleEndian() ? Lo : Hi, 7140 Ptr, ST->getPointerInfo(), NewStoredVT, Alignment, 7141 ST->getMemOperand()->getFlags()); 7142 7143 Ptr = DAG.getObjectPtrOffset(dl, Ptr, IncrementSize); 7144 Alignment = MinAlign(Alignment, IncrementSize); 7145 Store2 = DAG.getTruncStore( 7146 Chain, dl, DAG.getDataLayout().isLittleEndian() ? Hi : Lo, Ptr, 7147 ST->getPointerInfo().getWithOffset(IncrementSize), NewStoredVT, Alignment, 7148 ST->getMemOperand()->getFlags(), ST->getAAInfo()); 7149 7150 SDValue Result = 7151 DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Store1, Store2); 7152 return Result; 7153 } 7154 7155 SDValue 7156 TargetLowering::IncrementMemoryAddress(SDValue Addr, SDValue Mask, 7157 const SDLoc &DL, EVT DataVT, 7158 SelectionDAG &DAG, 7159 bool IsCompressedMemory) const { 7160 SDValue Increment; 7161 EVT AddrVT = Addr.getValueType(); 7162 EVT MaskVT = Mask.getValueType(); 7163 assert(DataVT.getVectorNumElements() == MaskVT.getVectorNumElements() && 7164 "Incompatible types of Data and Mask"); 7165 if (IsCompressedMemory) { 7166 if (DataVT.isScalableVector()) 7167 report_fatal_error( 7168 "Cannot currently handle compressed memory with scalable vectors"); 7169 // Incrementing the pointer according to number of '1's in the mask. 7170 EVT MaskIntVT = EVT::getIntegerVT(*DAG.getContext(), MaskVT.getSizeInBits()); 7171 SDValue MaskInIntReg = DAG.getBitcast(MaskIntVT, Mask); 7172 if (MaskIntVT.getSizeInBits() < 32) { 7173 MaskInIntReg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, MaskInIntReg); 7174 MaskIntVT = MVT::i32; 7175 } 7176 7177 // Count '1's with POPCNT. 7178 Increment = DAG.getNode(ISD::CTPOP, DL, MaskIntVT, MaskInIntReg); 7179 Increment = DAG.getZExtOrTrunc(Increment, DL, AddrVT); 7180 // Scale is an element size in bytes. 7181 SDValue Scale = DAG.getConstant(DataVT.getScalarSizeInBits() / 8, DL, 7182 AddrVT); 7183 Increment = DAG.getNode(ISD::MUL, DL, AddrVT, Increment, Scale); 7184 } else if (DataVT.isScalableVector()) { 7185 Increment = DAG.getVScale(DL, AddrVT, 7186 APInt(AddrVT.getSizeInBits().getFixedSize(), 7187 DataVT.getStoreSize().getKnownMinSize())); 7188 } else 7189 Increment = DAG.getConstant(DataVT.getStoreSize(), DL, AddrVT); 7190 7191 return DAG.getNode(ISD::ADD, DL, AddrVT, Addr, Increment); 7192 } 7193 7194 static SDValue clampDynamicVectorIndex(SelectionDAG &DAG, 7195 SDValue Idx, 7196 EVT VecVT, 7197 const SDLoc &dl) { 7198 if (isa<ConstantSDNode>(Idx)) 7199 return Idx; 7200 7201 EVT IdxVT = Idx.getValueType(); 7202 unsigned NElts = VecVT.getVectorNumElements(); 7203 if (isPowerOf2_32(NElts)) { 7204 APInt Imm = APInt::getLowBitsSet(IdxVT.getSizeInBits(), 7205 Log2_32(NElts)); 7206 return DAG.getNode(ISD::AND, dl, IdxVT, Idx, 7207 DAG.getConstant(Imm, dl, IdxVT)); 7208 } 7209 7210 return DAG.getNode(ISD::UMIN, dl, IdxVT, Idx, 7211 DAG.getConstant(NElts - 1, dl, IdxVT)); 7212 } 7213 7214 SDValue TargetLowering::getVectorElementPointer(SelectionDAG &DAG, 7215 SDValue VecPtr, EVT VecVT, 7216 SDValue Index) const { 7217 SDLoc dl(Index); 7218 // Make sure the index type is big enough to compute in. 7219 Index = DAG.getZExtOrTrunc(Index, dl, VecPtr.getValueType()); 7220 7221 EVT EltVT = VecVT.getVectorElementType(); 7222 7223 // Calculate the element offset and add it to the pointer. 7224 unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size. 7225 assert(EltSize * 8 == EltVT.getSizeInBits() && 7226 "Converting bits to bytes lost precision"); 7227 7228 Index = clampDynamicVectorIndex(DAG, Index, VecVT, dl); 7229 7230 EVT IdxVT = Index.getValueType(); 7231 7232 Index = DAG.getNode(ISD::MUL, dl, IdxVT, Index, 7233 DAG.getConstant(EltSize, dl, IdxVT)); 7234 return DAG.getMemBasePlusOffset(VecPtr, Index, dl); 7235 } 7236 7237 //===----------------------------------------------------------------------===// 7238 // Implementation of Emulated TLS Model 7239 //===----------------------------------------------------------------------===// 7240 7241 SDValue TargetLowering::LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA, 7242 SelectionDAG &DAG) const { 7243 // Access to address of TLS varialbe xyz is lowered to a function call: 7244 // __emutls_get_address( address of global variable named "__emutls_v.xyz" ) 7245 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7246 PointerType *VoidPtrType = Type::getInt8PtrTy(*DAG.getContext()); 7247 SDLoc dl(GA); 7248 7249 ArgListTy Args; 7250 ArgListEntry Entry; 7251 std::string NameString = ("__emutls_v." + GA->getGlobal()->getName()).str(); 7252 Module *VariableModule = const_cast<Module*>(GA->getGlobal()->getParent()); 7253 StringRef EmuTlsVarName(NameString); 7254 GlobalVariable *EmuTlsVar = VariableModule->getNamedGlobal(EmuTlsVarName); 7255 assert(EmuTlsVar && "Cannot find EmuTlsVar "); 7256 Entry.Node = DAG.getGlobalAddress(EmuTlsVar, dl, PtrVT); 7257 Entry.Ty = VoidPtrType; 7258 Args.push_back(Entry); 7259 7260 SDValue EmuTlsGetAddr = DAG.getExternalSymbol("__emutls_get_address", PtrVT); 7261 7262 TargetLowering::CallLoweringInfo CLI(DAG); 7263 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode()); 7264 CLI.setLibCallee(CallingConv::C, VoidPtrType, EmuTlsGetAddr, std::move(Args)); 7265 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 7266 7267 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls. 7268 // At last for X86 targets, maybe good for other targets too? 7269 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 7270 MFI.setAdjustsStack(true); // Is this only for X86 target? 7271 MFI.setHasCalls(true); 7272 7273 assert((GA->getOffset() == 0) && 7274 "Emulated TLS must have zero offset in GlobalAddressSDNode"); 7275 return CallResult.first; 7276 } 7277 7278 SDValue TargetLowering::lowerCmpEqZeroToCtlzSrl(SDValue Op, 7279 SelectionDAG &DAG) const { 7280 assert((Op->getOpcode() == ISD::SETCC) && "Input has to be a SETCC node."); 7281 if (!isCtlzFast()) 7282 return SDValue(); 7283 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 7284 SDLoc dl(Op); 7285 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) { 7286 if (C->isNullValue() && CC == ISD::SETEQ) { 7287 EVT VT = Op.getOperand(0).getValueType(); 7288 SDValue Zext = Op.getOperand(0); 7289 if (VT.bitsLT(MVT::i32)) { 7290 VT = MVT::i32; 7291 Zext = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Op.getOperand(0)); 7292 } 7293 unsigned Log2b = Log2_32(VT.getSizeInBits()); 7294 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Zext); 7295 SDValue Scc = DAG.getNode(ISD::SRL, dl, VT, Clz, 7296 DAG.getConstant(Log2b, dl, MVT::i32)); 7297 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Scc); 7298 } 7299 } 7300 return SDValue(); 7301 } 7302 7303 SDValue TargetLowering::expandAddSubSat(SDNode *Node, SelectionDAG &DAG) const { 7304 unsigned Opcode = Node->getOpcode(); 7305 SDValue LHS = Node->getOperand(0); 7306 SDValue RHS = Node->getOperand(1); 7307 EVT VT = LHS.getValueType(); 7308 SDLoc dl(Node); 7309 7310 assert(VT == RHS.getValueType() && "Expected operands to be the same type"); 7311 assert(VT.isInteger() && "Expected operands to be integers"); 7312 7313 // usub.sat(a, b) -> umax(a, b) - b 7314 if (Opcode == ISD::USUBSAT && isOperationLegalOrCustom(ISD::UMAX, VT)) { 7315 SDValue Max = DAG.getNode(ISD::UMAX, dl, VT, LHS, RHS); 7316 return DAG.getNode(ISD::SUB, dl, VT, Max, RHS); 7317 } 7318 7319 if (Opcode == ISD::UADDSAT && isOperationLegalOrCustom(ISD::UMIN, VT)) { 7320 SDValue InvRHS = DAG.getNOT(dl, RHS, VT); 7321 SDValue Min = DAG.getNode(ISD::UMIN, dl, VT, LHS, InvRHS); 7322 return DAG.getNode(ISD::ADD, dl, VT, Min, RHS); 7323 } 7324 7325 unsigned OverflowOp; 7326 switch (Opcode) { 7327 case ISD::SADDSAT: 7328 OverflowOp = ISD::SADDO; 7329 break; 7330 case ISD::UADDSAT: 7331 OverflowOp = ISD::UADDO; 7332 break; 7333 case ISD::SSUBSAT: 7334 OverflowOp = ISD::SSUBO; 7335 break; 7336 case ISD::USUBSAT: 7337 OverflowOp = ISD::USUBO; 7338 break; 7339 default: 7340 llvm_unreachable("Expected method to receive signed or unsigned saturation " 7341 "addition or subtraction node."); 7342 } 7343 7344 // FIXME: Should really try to split the vector in case it's legal on a 7345 // subvector. 7346 if (VT.isVector() && !isOperationLegalOrCustom(ISD::VSELECT, VT)) 7347 return DAG.UnrollVectorOp(Node); 7348 7349 unsigned BitWidth = LHS.getScalarValueSizeInBits(); 7350 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7351 SDValue Result = DAG.getNode(OverflowOp, dl, DAG.getVTList(VT, BoolVT), 7352 LHS, RHS); 7353 SDValue SumDiff = Result.getValue(0); 7354 SDValue Overflow = Result.getValue(1); 7355 SDValue Zero = DAG.getConstant(0, dl, VT); 7356 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT); 7357 7358 if (Opcode == ISD::UADDSAT) { 7359 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 7360 // (LHS + RHS) | OverflowMask 7361 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 7362 return DAG.getNode(ISD::OR, dl, VT, SumDiff, OverflowMask); 7363 } 7364 // Overflow ? 0xffff.... : (LHS + RHS) 7365 return DAG.getSelect(dl, VT, Overflow, AllOnes, SumDiff); 7366 } else if (Opcode == ISD::USUBSAT) { 7367 if (getBooleanContents(VT) == ZeroOrNegativeOneBooleanContent) { 7368 // (LHS - RHS) & ~OverflowMask 7369 SDValue OverflowMask = DAG.getSExtOrTrunc(Overflow, dl, VT); 7370 SDValue Not = DAG.getNOT(dl, OverflowMask, VT); 7371 return DAG.getNode(ISD::AND, dl, VT, SumDiff, Not); 7372 } 7373 // Overflow ? 0 : (LHS - RHS) 7374 return DAG.getSelect(dl, VT, Overflow, Zero, SumDiff); 7375 } else { 7376 // SatMax -> Overflow && SumDiff < 0 7377 // SatMin -> Overflow && SumDiff >= 0 7378 APInt MinVal = APInt::getSignedMinValue(BitWidth); 7379 APInt MaxVal = APInt::getSignedMaxValue(BitWidth); 7380 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 7381 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7382 SDValue SumNeg = DAG.getSetCC(dl, BoolVT, SumDiff, Zero, ISD::SETLT); 7383 Result = DAG.getSelect(dl, VT, SumNeg, SatMax, SatMin); 7384 return DAG.getSelect(dl, VT, Overflow, Result, SumDiff); 7385 } 7386 } 7387 7388 SDValue 7389 TargetLowering::expandFixedPointMul(SDNode *Node, SelectionDAG &DAG) const { 7390 assert((Node->getOpcode() == ISD::SMULFIX || 7391 Node->getOpcode() == ISD::UMULFIX || 7392 Node->getOpcode() == ISD::SMULFIXSAT || 7393 Node->getOpcode() == ISD::UMULFIXSAT) && 7394 "Expected a fixed point multiplication opcode"); 7395 7396 SDLoc dl(Node); 7397 SDValue LHS = Node->getOperand(0); 7398 SDValue RHS = Node->getOperand(1); 7399 EVT VT = LHS.getValueType(); 7400 unsigned Scale = Node->getConstantOperandVal(2); 7401 bool Saturating = (Node->getOpcode() == ISD::SMULFIXSAT || 7402 Node->getOpcode() == ISD::UMULFIXSAT); 7403 bool Signed = (Node->getOpcode() == ISD::SMULFIX || 7404 Node->getOpcode() == ISD::SMULFIXSAT); 7405 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7406 unsigned VTSize = VT.getScalarSizeInBits(); 7407 7408 if (!Scale) { 7409 // [us]mul.fix(a, b, 0) -> mul(a, b) 7410 if (!Saturating) { 7411 if (isOperationLegalOrCustom(ISD::MUL, VT)) 7412 return DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7413 } else if (Signed && isOperationLegalOrCustom(ISD::SMULO, VT)) { 7414 SDValue Result = 7415 DAG.getNode(ISD::SMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 7416 SDValue Product = Result.getValue(0); 7417 SDValue Overflow = Result.getValue(1); 7418 SDValue Zero = DAG.getConstant(0, dl, VT); 7419 7420 APInt MinVal = APInt::getSignedMinValue(VTSize); 7421 APInt MaxVal = APInt::getSignedMaxValue(VTSize); 7422 SDValue SatMin = DAG.getConstant(MinVal, dl, VT); 7423 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7424 SDValue ProdNeg = DAG.getSetCC(dl, BoolVT, Product, Zero, ISD::SETLT); 7425 Result = DAG.getSelect(dl, VT, ProdNeg, SatMax, SatMin); 7426 return DAG.getSelect(dl, VT, Overflow, Result, Product); 7427 } else if (!Signed && isOperationLegalOrCustom(ISD::UMULO, VT)) { 7428 SDValue Result = 7429 DAG.getNode(ISD::UMULO, dl, DAG.getVTList(VT, BoolVT), LHS, RHS); 7430 SDValue Product = Result.getValue(0); 7431 SDValue Overflow = Result.getValue(1); 7432 7433 APInt MaxVal = APInt::getMaxValue(VTSize); 7434 SDValue SatMax = DAG.getConstant(MaxVal, dl, VT); 7435 return DAG.getSelect(dl, VT, Overflow, SatMax, Product); 7436 } 7437 } 7438 7439 assert(((Signed && Scale < VTSize) || (!Signed && Scale <= VTSize)) && 7440 "Expected scale to be less than the number of bits if signed or at " 7441 "most the number of bits if unsigned."); 7442 assert(LHS.getValueType() == RHS.getValueType() && 7443 "Expected both operands to be the same type"); 7444 7445 // Get the upper and lower bits of the result. 7446 SDValue Lo, Hi; 7447 unsigned LoHiOp = Signed ? ISD::SMUL_LOHI : ISD::UMUL_LOHI; 7448 unsigned HiOp = Signed ? ISD::MULHS : ISD::MULHU; 7449 if (isOperationLegalOrCustom(LoHiOp, VT)) { 7450 SDValue Result = DAG.getNode(LoHiOp, dl, DAG.getVTList(VT, VT), LHS, RHS); 7451 Lo = Result.getValue(0); 7452 Hi = Result.getValue(1); 7453 } else if (isOperationLegalOrCustom(HiOp, VT)) { 7454 Lo = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7455 Hi = DAG.getNode(HiOp, dl, VT, LHS, RHS); 7456 } else if (VT.isVector()) { 7457 return SDValue(); 7458 } else { 7459 report_fatal_error("Unable to expand fixed point multiplication."); 7460 } 7461 7462 if (Scale == VTSize) 7463 // Result is just the top half since we'd be shifting by the width of the 7464 // operand. Overflow impossible so this works for both UMULFIX and 7465 // UMULFIXSAT. 7466 return Hi; 7467 7468 // The result will need to be shifted right by the scale since both operands 7469 // are scaled. The result is given to us in 2 halves, so we only want part of 7470 // both in the result. 7471 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7472 SDValue Result = DAG.getNode(ISD::FSHR, dl, VT, Hi, Lo, 7473 DAG.getConstant(Scale, dl, ShiftTy)); 7474 if (!Saturating) 7475 return Result; 7476 7477 if (!Signed) { 7478 // Unsigned overflow happened if the upper (VTSize - Scale) bits (of the 7479 // widened multiplication) aren't all zeroes. 7480 7481 // Saturate to max if ((Hi >> Scale) != 0), 7482 // which is the same as if (Hi > ((1 << Scale) - 1)) 7483 APInt MaxVal = APInt::getMaxValue(VTSize); 7484 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale), 7485 dl, VT); 7486 Result = DAG.getSelectCC(dl, Hi, LowMask, 7487 DAG.getConstant(MaxVal, dl, VT), Result, 7488 ISD::SETUGT); 7489 7490 return Result; 7491 } 7492 7493 // Signed overflow happened if the upper (VTSize - Scale + 1) bits (of the 7494 // widened multiplication) aren't all ones or all zeroes. 7495 7496 SDValue SatMin = DAG.getConstant(APInt::getSignedMinValue(VTSize), dl, VT); 7497 SDValue SatMax = DAG.getConstant(APInt::getSignedMaxValue(VTSize), dl, VT); 7498 7499 if (Scale == 0) { 7500 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, Lo, 7501 DAG.getConstant(VTSize - 1, dl, ShiftTy)); 7502 SDValue Overflow = DAG.getSetCC(dl, BoolVT, Hi, Sign, ISD::SETNE); 7503 // Saturated to SatMin if wide product is negative, and SatMax if wide 7504 // product is positive ... 7505 SDValue Zero = DAG.getConstant(0, dl, VT); 7506 SDValue ResultIfOverflow = DAG.getSelectCC(dl, Hi, Zero, SatMin, SatMax, 7507 ISD::SETLT); 7508 // ... but only if we overflowed. 7509 return DAG.getSelect(dl, VT, Overflow, ResultIfOverflow, Result); 7510 } 7511 7512 // We handled Scale==0 above so all the bits to examine is in Hi. 7513 7514 // Saturate to max if ((Hi >> (Scale - 1)) > 0), 7515 // which is the same as if (Hi > (1 << (Scale - 1)) - 1) 7516 SDValue LowMask = DAG.getConstant(APInt::getLowBitsSet(VTSize, Scale - 1), 7517 dl, VT); 7518 Result = DAG.getSelectCC(dl, Hi, LowMask, SatMax, Result, ISD::SETGT); 7519 // Saturate to min if (Hi >> (Scale - 1)) < -1), 7520 // which is the same as if (HI < (-1 << (Scale - 1)) 7521 SDValue HighMask = 7522 DAG.getConstant(APInt::getHighBitsSet(VTSize, VTSize - Scale + 1), 7523 dl, VT); 7524 Result = DAG.getSelectCC(dl, Hi, HighMask, SatMin, Result, ISD::SETLT); 7525 return Result; 7526 } 7527 7528 SDValue 7529 TargetLowering::expandFixedPointDiv(unsigned Opcode, const SDLoc &dl, 7530 SDValue LHS, SDValue RHS, 7531 unsigned Scale, SelectionDAG &DAG) const { 7532 assert((Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT || 7533 Opcode == ISD::UDIVFIX || Opcode == ISD::UDIVFIXSAT) && 7534 "Expected a fixed point division opcode"); 7535 7536 EVT VT = LHS.getValueType(); 7537 bool Signed = Opcode == ISD::SDIVFIX || Opcode == ISD::SDIVFIXSAT; 7538 bool Saturating = Opcode == ISD::SDIVFIXSAT || Opcode == ISD::UDIVFIXSAT; 7539 EVT BoolVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7540 7541 // If there is enough room in the type to upscale the LHS or downscale the 7542 // RHS before the division, we can perform it in this type without having to 7543 // resize. For signed operations, the LHS headroom is the number of 7544 // redundant sign bits, and for unsigned ones it is the number of zeroes. 7545 // The headroom for the RHS is the number of trailing zeroes. 7546 unsigned LHSLead = Signed ? DAG.ComputeNumSignBits(LHS) - 1 7547 : DAG.computeKnownBits(LHS).countMinLeadingZeros(); 7548 unsigned RHSTrail = DAG.computeKnownBits(RHS).countMinTrailingZeros(); 7549 7550 // For signed saturating operations, we need to be able to detect true integer 7551 // division overflow; that is, when you have MIN / -EPS. However, this 7552 // is undefined behavior and if we emit divisions that could take such 7553 // values it may cause undesired behavior (arithmetic exceptions on x86, for 7554 // example). 7555 // Avoid this by requiring an extra bit so that we never get this case. 7556 // FIXME: This is a bit unfortunate as it means that for an 8-bit 7-scale 7557 // signed saturating division, we need to emit a whopping 32-bit division. 7558 if (LHSLead + RHSTrail < Scale + (unsigned)(Saturating && Signed)) 7559 return SDValue(); 7560 7561 unsigned LHSShift = std::min(LHSLead, Scale); 7562 unsigned RHSShift = Scale - LHSShift; 7563 7564 // At this point, we know that if we shift the LHS up by LHSShift and the 7565 // RHS down by RHSShift, we can emit a regular division with a final scaling 7566 // factor of Scale. 7567 7568 EVT ShiftTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7569 if (LHSShift) 7570 LHS = DAG.getNode(ISD::SHL, dl, VT, LHS, 7571 DAG.getConstant(LHSShift, dl, ShiftTy)); 7572 if (RHSShift) 7573 RHS = DAG.getNode(Signed ? ISD::SRA : ISD::SRL, dl, VT, RHS, 7574 DAG.getConstant(RHSShift, dl, ShiftTy)); 7575 7576 SDValue Quot; 7577 if (Signed) { 7578 // For signed operations, if the resulting quotient is negative and the 7579 // remainder is nonzero, subtract 1 from the quotient to round towards 7580 // negative infinity. 7581 SDValue Rem; 7582 // FIXME: Ideally we would always produce an SDIVREM here, but if the 7583 // type isn't legal, SDIVREM cannot be expanded. There is no reason why 7584 // we couldn't just form a libcall, but the type legalizer doesn't do it. 7585 if (isTypeLegal(VT) && 7586 isOperationLegalOrCustom(ISD::SDIVREM, VT)) { 7587 Quot = DAG.getNode(ISD::SDIVREM, dl, 7588 DAG.getVTList(VT, VT), 7589 LHS, RHS); 7590 Rem = Quot.getValue(1); 7591 Quot = Quot.getValue(0); 7592 } else { 7593 Quot = DAG.getNode(ISD::SDIV, dl, VT, 7594 LHS, RHS); 7595 Rem = DAG.getNode(ISD::SREM, dl, VT, 7596 LHS, RHS); 7597 } 7598 SDValue Zero = DAG.getConstant(0, dl, VT); 7599 SDValue RemNonZero = DAG.getSetCC(dl, BoolVT, Rem, Zero, ISD::SETNE); 7600 SDValue LHSNeg = DAG.getSetCC(dl, BoolVT, LHS, Zero, ISD::SETLT); 7601 SDValue RHSNeg = DAG.getSetCC(dl, BoolVT, RHS, Zero, ISD::SETLT); 7602 SDValue QuotNeg = DAG.getNode(ISD::XOR, dl, BoolVT, LHSNeg, RHSNeg); 7603 SDValue Sub1 = DAG.getNode(ISD::SUB, dl, VT, Quot, 7604 DAG.getConstant(1, dl, VT)); 7605 Quot = DAG.getSelect(dl, VT, 7606 DAG.getNode(ISD::AND, dl, BoolVT, RemNonZero, QuotNeg), 7607 Sub1, Quot); 7608 } else 7609 Quot = DAG.getNode(ISD::UDIV, dl, VT, 7610 LHS, RHS); 7611 7612 return Quot; 7613 } 7614 7615 void TargetLowering::expandUADDSUBO( 7616 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 7617 SDLoc dl(Node); 7618 SDValue LHS = Node->getOperand(0); 7619 SDValue RHS = Node->getOperand(1); 7620 bool IsAdd = Node->getOpcode() == ISD::UADDO; 7621 7622 // If ADD/SUBCARRY is legal, use that instead. 7623 unsigned OpcCarry = IsAdd ? ISD::ADDCARRY : ISD::SUBCARRY; 7624 if (isOperationLegalOrCustom(OpcCarry, Node->getValueType(0))) { 7625 SDValue CarryIn = DAG.getConstant(0, dl, Node->getValueType(1)); 7626 SDValue NodeCarry = DAG.getNode(OpcCarry, dl, Node->getVTList(), 7627 { LHS, RHS, CarryIn }); 7628 Result = SDValue(NodeCarry.getNode(), 0); 7629 Overflow = SDValue(NodeCarry.getNode(), 1); 7630 return; 7631 } 7632 7633 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 7634 LHS.getValueType(), LHS, RHS); 7635 7636 EVT ResultType = Node->getValueType(1); 7637 EVT SetCCType = getSetCCResultType( 7638 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 7639 ISD::CondCode CC = IsAdd ? ISD::SETULT : ISD::SETUGT; 7640 SDValue SetCC = DAG.getSetCC(dl, SetCCType, Result, LHS, CC); 7641 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 7642 } 7643 7644 void TargetLowering::expandSADDSUBO( 7645 SDNode *Node, SDValue &Result, SDValue &Overflow, SelectionDAG &DAG) const { 7646 SDLoc dl(Node); 7647 SDValue LHS = Node->getOperand(0); 7648 SDValue RHS = Node->getOperand(1); 7649 bool IsAdd = Node->getOpcode() == ISD::SADDO; 7650 7651 Result = DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, dl, 7652 LHS.getValueType(), LHS, RHS); 7653 7654 EVT ResultType = Node->getValueType(1); 7655 EVT OType = getSetCCResultType( 7656 DAG.getDataLayout(), *DAG.getContext(), Node->getValueType(0)); 7657 7658 // If SADDSAT/SSUBSAT is legal, compare results to detect overflow. 7659 unsigned OpcSat = IsAdd ? ISD::SADDSAT : ISD::SSUBSAT; 7660 if (isOperationLegalOrCustom(OpcSat, LHS.getValueType())) { 7661 SDValue Sat = DAG.getNode(OpcSat, dl, LHS.getValueType(), LHS, RHS); 7662 SDValue SetCC = DAG.getSetCC(dl, OType, Result, Sat, ISD::SETNE); 7663 Overflow = DAG.getBoolExtOrTrunc(SetCC, dl, ResultType, ResultType); 7664 return; 7665 } 7666 7667 SDValue Zero = DAG.getConstant(0, dl, LHS.getValueType()); 7668 7669 // For an addition, the result should be less than one of the operands (LHS) 7670 // if and only if the other operand (RHS) is negative, otherwise there will 7671 // be overflow. 7672 // For a subtraction, the result should be less than one of the operands 7673 // (LHS) if and only if the other operand (RHS) is (non-zero) positive, 7674 // otherwise there will be overflow. 7675 SDValue ResultLowerThanLHS = DAG.getSetCC(dl, OType, Result, LHS, ISD::SETLT); 7676 SDValue ConditionRHS = 7677 DAG.getSetCC(dl, OType, RHS, Zero, IsAdd ? ISD::SETLT : ISD::SETGT); 7678 7679 Overflow = DAG.getBoolExtOrTrunc( 7680 DAG.getNode(ISD::XOR, dl, OType, ConditionRHS, ResultLowerThanLHS), dl, 7681 ResultType, ResultType); 7682 } 7683 7684 bool TargetLowering::expandMULO(SDNode *Node, SDValue &Result, 7685 SDValue &Overflow, SelectionDAG &DAG) const { 7686 SDLoc dl(Node); 7687 EVT VT = Node->getValueType(0); 7688 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 7689 SDValue LHS = Node->getOperand(0); 7690 SDValue RHS = Node->getOperand(1); 7691 bool isSigned = Node->getOpcode() == ISD::SMULO; 7692 7693 // For power-of-two multiplications we can use a simpler shift expansion. 7694 if (ConstantSDNode *RHSC = isConstOrConstSplat(RHS)) { 7695 const APInt &C = RHSC->getAPIntValue(); 7696 // mulo(X, 1 << S) -> { X << S, (X << S) >> S != X } 7697 if (C.isPowerOf2()) { 7698 // smulo(x, signed_min) is same as umulo(x, signed_min). 7699 bool UseArithShift = isSigned && !C.isMinSignedValue(); 7700 EVT ShiftAmtTy = getShiftAmountTy(VT, DAG.getDataLayout()); 7701 SDValue ShiftAmt = DAG.getConstant(C.logBase2(), dl, ShiftAmtTy); 7702 Result = DAG.getNode(ISD::SHL, dl, VT, LHS, ShiftAmt); 7703 Overflow = DAG.getSetCC(dl, SetCCVT, 7704 DAG.getNode(UseArithShift ? ISD::SRA : ISD::SRL, 7705 dl, VT, Result, ShiftAmt), 7706 LHS, ISD::SETNE); 7707 return true; 7708 } 7709 } 7710 7711 EVT WideVT = EVT::getIntegerVT(*DAG.getContext(), VT.getScalarSizeInBits() * 2); 7712 if (VT.isVector()) 7713 WideVT = EVT::getVectorVT(*DAG.getContext(), WideVT, 7714 VT.getVectorNumElements()); 7715 7716 SDValue BottomHalf; 7717 SDValue TopHalf; 7718 static const unsigned Ops[2][3] = 7719 { { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND }, 7720 { ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }}; 7721 if (isOperationLegalOrCustom(Ops[isSigned][0], VT)) { 7722 BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS); 7723 TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS); 7724 } else if (isOperationLegalOrCustom(Ops[isSigned][1], VT)) { 7725 BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS, 7726 RHS); 7727 TopHalf = BottomHalf.getValue(1); 7728 } else if (isTypeLegal(WideVT)) { 7729 LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS); 7730 RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS); 7731 SDValue Mul = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS); 7732 BottomHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul); 7733 SDValue ShiftAmt = DAG.getConstant(VT.getScalarSizeInBits(), dl, 7734 getShiftAmountTy(WideVT, DAG.getDataLayout())); 7735 TopHalf = DAG.getNode(ISD::TRUNCATE, dl, VT, 7736 DAG.getNode(ISD::SRL, dl, WideVT, Mul, ShiftAmt)); 7737 } else { 7738 if (VT.isVector()) 7739 return false; 7740 7741 // We can fall back to a libcall with an illegal type for the MUL if we 7742 // have a libcall big enough. 7743 // Also, we can fall back to a division in some cases, but that's a big 7744 // performance hit in the general case. 7745 RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL; 7746 if (WideVT == MVT::i16) 7747 LC = RTLIB::MUL_I16; 7748 else if (WideVT == MVT::i32) 7749 LC = RTLIB::MUL_I32; 7750 else if (WideVT == MVT::i64) 7751 LC = RTLIB::MUL_I64; 7752 else if (WideVT == MVT::i128) 7753 LC = RTLIB::MUL_I128; 7754 assert(LC != RTLIB::UNKNOWN_LIBCALL && "Cannot expand this operation!"); 7755 7756 SDValue HiLHS; 7757 SDValue HiRHS; 7758 if (isSigned) { 7759 // The high part is obtained by SRA'ing all but one of the bits of low 7760 // part. 7761 unsigned LoSize = VT.getSizeInBits(); 7762 HiLHS = 7763 DAG.getNode(ISD::SRA, dl, VT, LHS, 7764 DAG.getConstant(LoSize - 1, dl, 7765 getPointerTy(DAG.getDataLayout()))); 7766 HiRHS = 7767 DAG.getNode(ISD::SRA, dl, VT, RHS, 7768 DAG.getConstant(LoSize - 1, dl, 7769 getPointerTy(DAG.getDataLayout()))); 7770 } else { 7771 HiLHS = DAG.getConstant(0, dl, VT); 7772 HiRHS = DAG.getConstant(0, dl, VT); 7773 } 7774 7775 // Here we're passing the 2 arguments explicitly as 4 arguments that are 7776 // pre-lowered to the correct types. This all depends upon WideVT not 7777 // being a legal type for the architecture and thus has to be split to 7778 // two arguments. 7779 SDValue Ret; 7780 TargetLowering::MakeLibCallOptions CallOptions; 7781 CallOptions.setSExt(isSigned); 7782 CallOptions.setIsPostTypeLegalization(true); 7783 if (shouldSplitFunctionArgumentsAsLittleEndian(DAG.getDataLayout())) { 7784 // Halves of WideVT are packed into registers in different order 7785 // depending on platform endianness. This is usually handled by 7786 // the C calling convention, but we can't defer to it in 7787 // the legalizer. 7788 SDValue Args[] = { LHS, HiLHS, RHS, HiRHS }; 7789 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 7790 } else { 7791 SDValue Args[] = { HiLHS, LHS, HiRHS, RHS }; 7792 Ret = makeLibCall(DAG, LC, WideVT, Args, CallOptions, dl).first; 7793 } 7794 assert(Ret.getOpcode() == ISD::MERGE_VALUES && 7795 "Ret value is a collection of constituent nodes holding result."); 7796 if (DAG.getDataLayout().isLittleEndian()) { 7797 // Same as above. 7798 BottomHalf = Ret.getOperand(0); 7799 TopHalf = Ret.getOperand(1); 7800 } else { 7801 BottomHalf = Ret.getOperand(1); 7802 TopHalf = Ret.getOperand(0); 7803 } 7804 } 7805 7806 Result = BottomHalf; 7807 if (isSigned) { 7808 SDValue ShiftAmt = DAG.getConstant( 7809 VT.getScalarSizeInBits() - 1, dl, 7810 getShiftAmountTy(BottomHalf.getValueType(), DAG.getDataLayout())); 7811 SDValue Sign = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt); 7812 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, Sign, ISD::SETNE); 7813 } else { 7814 Overflow = DAG.getSetCC(dl, SetCCVT, TopHalf, 7815 DAG.getConstant(0, dl, VT), ISD::SETNE); 7816 } 7817 7818 // Truncate the result if SetCC returns a larger type than needed. 7819 EVT RType = Node->getValueType(1); 7820 if (RType.getSizeInBits() < Overflow.getValueSizeInBits()) 7821 Overflow = DAG.getNode(ISD::TRUNCATE, dl, RType, Overflow); 7822 7823 assert(RType.getSizeInBits() == Overflow.getValueSizeInBits() && 7824 "Unexpected result type for S/UMULO legalization"); 7825 return true; 7826 } 7827 7828 SDValue TargetLowering::expandVecReduce(SDNode *Node, SelectionDAG &DAG) const { 7829 SDLoc dl(Node); 7830 bool NoNaN = Node->getFlags().hasNoNaNs(); 7831 unsigned BaseOpcode = 0; 7832 switch (Node->getOpcode()) { 7833 default: llvm_unreachable("Expected VECREDUCE opcode"); 7834 case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break; 7835 case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break; 7836 case ISD::VECREDUCE_ADD: BaseOpcode = ISD::ADD; break; 7837 case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break; 7838 case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break; 7839 case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break; 7840 case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break; 7841 case ISD::VECREDUCE_SMAX: BaseOpcode = ISD::SMAX; break; 7842 case ISD::VECREDUCE_SMIN: BaseOpcode = ISD::SMIN; break; 7843 case ISD::VECREDUCE_UMAX: BaseOpcode = ISD::UMAX; break; 7844 case ISD::VECREDUCE_UMIN: BaseOpcode = ISD::UMIN; break; 7845 case ISD::VECREDUCE_FMAX: 7846 BaseOpcode = NoNaN ? ISD::FMAXNUM : ISD::FMAXIMUM; 7847 break; 7848 case ISD::VECREDUCE_FMIN: 7849 BaseOpcode = NoNaN ? ISD::FMINNUM : ISD::FMINIMUM; 7850 break; 7851 } 7852 7853 SDValue Op = Node->getOperand(0); 7854 EVT VT = Op.getValueType(); 7855 7856 // Try to use a shuffle reduction for power of two vectors. 7857 if (VT.isPow2VectorType()) { 7858 while (VT.getVectorNumElements() > 1) { 7859 EVT HalfVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); 7860 if (!isOperationLegalOrCustom(BaseOpcode, HalfVT)) 7861 break; 7862 7863 SDValue Lo, Hi; 7864 std::tie(Lo, Hi) = DAG.SplitVector(Op, dl); 7865 Op = DAG.getNode(BaseOpcode, dl, HalfVT, Lo, Hi); 7866 VT = HalfVT; 7867 } 7868 } 7869 7870 EVT EltVT = VT.getVectorElementType(); 7871 unsigned NumElts = VT.getVectorNumElements(); 7872 7873 SmallVector<SDValue, 8> Ops; 7874 DAG.ExtractVectorElements(Op, Ops, 0, NumElts); 7875 7876 SDValue Res = Ops[0]; 7877 for (unsigned i = 1; i < NumElts; i++) 7878 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res, Ops[i], Node->getFlags()); 7879 7880 // Result type may be wider than element type. 7881 if (EltVT != Node->getValueType(0)) 7882 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Node->getValueType(0), Res); 7883 return Res; 7884 } 7885 7886 bool TargetLowering::expandREM(SDNode *Node, SDValue &Result, 7887 SelectionDAG &DAG) const { 7888 EVT VT = Node->getValueType(0); 7889 SDLoc dl(Node); 7890 bool isSigned = Node->getOpcode() == ISD::SREM; 7891 unsigned DivOpc = isSigned ? ISD::SDIV : ISD::UDIV; 7892 unsigned DivRemOpc = isSigned ? ISD::SDIVREM : ISD::UDIVREM; 7893 SDValue Dividend = Node->getOperand(0); 7894 SDValue Divisor = Node->getOperand(1); 7895 if (isOperationLegalOrCustom(DivRemOpc, VT)) { 7896 SDVTList VTs = DAG.getVTList(VT, VT); 7897 Result = DAG.getNode(DivRemOpc, dl, VTs, Dividend, Divisor).getValue(1); 7898 return true; 7899 } else if (isOperationLegalOrCustom(DivOpc, VT)) { 7900 // X % Y -> X-X/Y*Y 7901 SDValue Divide = DAG.getNode(DivOpc, dl, VT, Dividend, Divisor); 7902 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Divide, Divisor); 7903 Result = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul); 7904 return true; 7905 } 7906 return false; 7907 } 7908